evalscope 0.12.0__py3-none-any.whl → 0.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (85) hide show
  1. evalscope/arguments.py +6 -1
  2. evalscope/benchmarks/aime/aime24_adapter.py +3 -3
  3. evalscope/benchmarks/aime/aime25_adapter.py +3 -3
  4. evalscope/benchmarks/arc/arc_adapter.py +15 -18
  5. evalscope/benchmarks/bbh/bbh_adapter.py +6 -6
  6. evalscope/benchmarks/benchmark.py +12 -11
  7. evalscope/benchmarks/ceval/ceval_adapter.py +12 -16
  8. evalscope/benchmarks/chinese_simple_qa/__init__.py +0 -0
  9. evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +168 -0
  10. evalscope/benchmarks/cmmlu/cmmlu_adapter.py +13 -17
  11. evalscope/benchmarks/competition_math/competition_math_adapter.py +3 -3
  12. evalscope/benchmarks/data_adapter.py +59 -21
  13. evalscope/benchmarks/data_collection/data_collection_adapter.py +0 -1
  14. evalscope/benchmarks/general_mcq/general_mcq_adapter.py +9 -12
  15. evalscope/benchmarks/general_qa/general_qa_adapter.py +30 -15
  16. evalscope/benchmarks/gpqa/gpqa_adapter.py +12 -7
  17. evalscope/benchmarks/gsm8k/gsm8k_adapter.py +2 -3
  18. evalscope/benchmarks/hellaswag/hellaswag_adapter.py +23 -31
  19. evalscope/benchmarks/humaneval/humaneval_adapter.py +10 -7
  20. evalscope/benchmarks/ifeval/ifeval_adapter.py +2 -3
  21. evalscope/benchmarks/iquiz/iquiz_adapter.py +9 -5
  22. evalscope/benchmarks/live_code_bench/__init__.py +0 -0
  23. evalscope/benchmarks/live_code_bench/evaluate_utils.py +193 -0
  24. evalscope/benchmarks/live_code_bench/execute_utils.py +267 -0
  25. evalscope/benchmarks/live_code_bench/extract_utils.py +70 -0
  26. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +90 -0
  27. evalscope/benchmarks/live_code_bench/load_utils.py +71 -0
  28. evalscope/benchmarks/live_code_bench/pass_k_utils.py +56 -0
  29. evalscope/benchmarks/live_code_bench/prompts.py +207 -0
  30. evalscope/benchmarks/live_code_bench/testing_util.py +721 -0
  31. evalscope/benchmarks/math_500/math_500_adapter.py +2 -6
  32. evalscope/benchmarks/mmlu/mmlu_adapter.py +13 -17
  33. evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +9 -5
  34. evalscope/benchmarks/musr/musr_adapter.py +8 -5
  35. evalscope/benchmarks/process_bench/process_bench_adapter.py +8 -5
  36. evalscope/benchmarks/race/race_adapter.py +12 -16
  37. evalscope/benchmarks/simple_qa/__init__.py +0 -0
  38. evalscope/benchmarks/simple_qa/simple_qa_adapter.py +167 -0
  39. evalscope/benchmarks/super_gpqa/__init__.py +0 -0
  40. evalscope/benchmarks/super_gpqa/five_shot_prompt.txt +89 -0
  41. evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +191 -0
  42. evalscope/benchmarks/super_gpqa/utils.py +85 -0
  43. evalscope/benchmarks/super_gpqa/zero_shot_prompt.txt +3 -0
  44. evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +3 -4
  45. evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +6 -13
  46. evalscope/benchmarks/utils.py +43 -0
  47. evalscope/collections/evaluator.py +14 -5
  48. evalscope/config.py +15 -2
  49. evalscope/constants.py +14 -0
  50. evalscope/evaluator/evaluator.py +51 -13
  51. evalscope/metrics/llm_judge.py +104 -0
  52. evalscope/metrics/named_metrics.py +1 -0
  53. evalscope/models/__init__.py +2 -1
  54. evalscope/models/base_adapter.py +25 -5
  55. evalscope/models/chat_adapter.py +3 -0
  56. evalscope/models/choice_adapter.py +4 -0
  57. evalscope/models/custom_adapter.py +2 -0
  58. evalscope/models/register.py +28 -0
  59. evalscope/models/server_adapter.py +35 -8
  60. evalscope/perf/arguments.py +13 -7
  61. evalscope/perf/benchmark.py +5 -0
  62. evalscope/perf/http_client.py +15 -5
  63. evalscope/perf/main.py +1 -0
  64. evalscope/perf/utils/analysis_result.py +1 -1
  65. evalscope/report/app.py +3 -0
  66. evalscope/report/combinator.py +2 -2
  67. evalscope/run.py +6 -5
  68. evalscope/third_party/longbench_write/infer.py +1 -1
  69. evalscope/third_party/thinkbench/eval.py +220 -55
  70. evalscope/third_party/thinkbench/infer.py +37 -7
  71. evalscope/third_party/thinkbench/tools/llm.py +1 -0
  72. evalscope/third_party/toolbench_static/llm/swift_infer.py +50 -20
  73. evalscope/utils/chat_service.py +1 -0
  74. evalscope/utils/filters.py +59 -0
  75. evalscope/utils/logger.py +3 -3
  76. evalscope/version.py +2 -2
  77. {evalscope-0.12.0.dist-info → evalscope-0.13.0.dist-info}/METADATA +31 -12
  78. {evalscope-0.12.0.dist-info → evalscope-0.13.0.dist-info}/RECORD +85 -62
  79. tests/cli/test_all.py +144 -0
  80. tests/cli/test_collection.py +28 -2
  81. tests/cli/test_run.py +201 -32
  82. {evalscope-0.12.0.dist-info → evalscope-0.13.0.dist-info}/LICENSE +0 -0
  83. {evalscope-0.12.0.dist-info → evalscope-0.13.0.dist-info}/WHEEL +0 -0
  84. {evalscope-0.12.0.dist-info → evalscope-0.13.0.dist-info}/entry_points.txt +0 -0
  85. {evalscope-0.12.0.dist-info → evalscope-0.13.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,191 @@
1
+ import os
2
+ import random
3
+ import re
4
+
5
+ from evalscope.benchmarks import Benchmark, DataAdapter
6
+ from evalscope.constants import EvalType, OutputType
7
+ from evalscope.metrics import exact_match
8
+ from evalscope.utils import logger
9
+
10
+ current_dir = os.path.dirname(os.path.abspath(__file__))
11
+
12
+ SUBSET_LIST = [
13
+ 'Electronic Science and Technology', 'Philosophy', 'Traditional Chinese Medicine', 'Applied Economics',
14
+ 'Mathematics', 'Physics', 'Clinical Medicine', 'Computer Science and Technology',
15
+ 'Information and Communication Engineering', 'Control Science and Engineering', 'Theoretical Economics', 'Law',
16
+ 'History', 'Basic Medicine', 'Education', 'Materials Science and Engineering', 'Electrical Engineering',
17
+ 'Systems Science', 'Power Engineering and Engineering Thermophysics', 'Military Science', 'Biology',
18
+ 'Business Administration', 'Language and Literature', 'Public Health and Preventive Medicine', 'Political Science',
19
+ 'Chemistry', 'Hydraulic Engineering', 'Chemical Engineering and Technology', 'Pharmacy', 'Geography', 'Art Studies',
20
+ 'Architecture', 'Forestry Engineering', 'Public Administration', 'Oceanography', 'Journalism and Communication',
21
+ 'Nuclear Science and Technology', 'Weapon Science and Technology', 'Naval Architecture and Ocean Engineering',
22
+ 'Environmental Science and Engineering', 'Transportation Engineering', 'Geology', 'Physical Oceanography',
23
+ 'Musicology', 'Stomatology', 'Aquaculture', 'Mechanical Engineering',
24
+ 'Aeronautical and Astronautical Science and Technology', 'Civil Engineering', 'Mechanics',
25
+ 'Petroleum and Natural Gas Engineering', 'Sociology', 'Food Science and Engineering', 'Agricultural Engineering',
26
+ 'Surveying and Mapping Science and Technology', 'Metallurgical Engineering',
27
+ 'Library, Information and Archival Management', 'Mining Engineering', 'Astronomy',
28
+ 'Geological Resources and Geological Engineering', 'Atmospheric Science', 'Optical Engineering', 'Animal Husbandry',
29
+ 'Geophysics', 'Crop Science', 'Management Science and Engineering', 'Psychology', 'Forestry',
30
+ 'Textile Science and Engineering', 'Veterinary Medicine', 'Instrument Science and Technology', 'Physical Education'
31
+ ]
32
+
33
+ SUBSET_MAPPING = {
34
+ 'Electronic Science and Technology': ['Engineering'],
35
+ 'Philosophy': ['Philosophy'],
36
+ 'Traditional Chinese Medicine': ['Medicine'],
37
+ 'Applied Economics': ['Economics'],
38
+ 'Mathematics': ['Science'],
39
+ 'Physics': ['Science'],
40
+ 'Clinical Medicine': ['Medicine'],
41
+ 'Computer Science and Technology': ['Engineering'],
42
+ 'Information and Communication Engineering': ['Engineering'],
43
+ 'Control Science and Engineering': ['Engineering'],
44
+ 'Theoretical Economics': ['Economics'],
45
+ 'Law': ['Law'],
46
+ 'History': ['History'],
47
+ 'Basic Medicine': ['Medicine'],
48
+ 'Education': ['Education'],
49
+ 'Materials Science and Engineering': ['Engineering'],
50
+ 'Electrical Engineering': ['Engineering'],
51
+ 'Systems Science': ['Science'],
52
+ 'Power Engineering and Engineering Thermophysics': ['Engineering'],
53
+ 'Military Science': ['Military Science'],
54
+ 'Biology': ['Science'],
55
+ 'Business Administration': ['Management'],
56
+ 'Language and Literature': ['Literature and Arts'],
57
+ 'Public Health and Preventive Medicine': ['Medicine'],
58
+ 'Political Science': ['Law'],
59
+ 'Chemistry': ['Science'],
60
+ 'Hydraulic Engineering': ['Engineering'],
61
+ 'Chemical Engineering and Technology': ['Engineering'],
62
+ 'Pharmacy': ['Medicine'],
63
+ 'Geography': ['Science'],
64
+ 'Art Studies': ['Literature and Arts'],
65
+ 'Architecture': ['Engineering'],
66
+ 'Forestry Engineering': ['Engineering'],
67
+ 'Public Administration': ['Management'],
68
+ 'Oceanography': ['Science'],
69
+ 'Journalism and Communication': ['Literature and Arts'],
70
+ 'Nuclear Science and Technology': ['Engineering'],
71
+ 'Weapon Science and Technology': ['Engineering'],
72
+ 'Naval Architecture and Ocean Engineering': ['Engineering'],
73
+ 'Environmental Science and Engineering': ['Engineering'],
74
+ 'Transportation Engineering': ['Engineering'],
75
+ 'Geology': ['Science'],
76
+ 'Physical Oceanography': ['Science'],
77
+ 'Musicology': ['Literature and Arts'],
78
+ 'Stomatology': ['Medicine'],
79
+ 'Aquaculture': ['Agronomy'],
80
+ 'Mechanical Engineering': ['Engineering'],
81
+ 'Aeronautical and Astronautical Science and Technology': ['Engineering'],
82
+ 'Civil Engineering': ['Engineering'],
83
+ 'Mechanics': ['Engineering'],
84
+ 'Petroleum and Natural Gas Engineering': ['Engineering'],
85
+ 'Sociology': ['Sociology'],
86
+ 'Food Science and Engineering': ['Engineering'],
87
+ 'Agricultural Engineering': ['Engineering'],
88
+ 'Surveying and Mapping Science and Technology': ['Engineering'],
89
+ 'Metallurgical Engineering': ['Engineering'],
90
+ 'Library, Information and Archival Management': ['Management'],
91
+ 'Mining Engineering': ['Engineering'],
92
+ 'Astronomy': ['Science'],
93
+ 'Geological Resources and Geological Engineering': ['Engineering'],
94
+ 'Atmospheric Science': ['Science'],
95
+ 'Optical Engineering': ['Engineering'],
96
+ 'Animal Husbandry': ['Agronomy'],
97
+ 'Geophysics': ['Science'],
98
+ 'Crop Science': ['Agronomy'],
99
+ 'Management Science and Engineering': ['Management'],
100
+ 'Psychology': ['Education'],
101
+ 'Forestry': ['Agronomy'],
102
+ 'Textile Science and Engineering': ['Engineering'],
103
+ 'Veterinary Medicine': ['Agronomy'],
104
+ 'Instrument Science and Technology': ['Engineering'],
105
+ 'Physical Education': ['Education']
106
+ }
107
+
108
+
109
+ @Benchmark.register(
110
+ name='super_gpqa',
111
+ pretty_name='SuperGPQA',
112
+ dataset_id='m-a-p/SuperGPQA',
113
+ model_adapter=OutputType.GENERATION,
114
+ output_types=[OutputType.MULTIPLE_CHOICE, OutputType.GENERATION],
115
+ subset_list=SUBSET_LIST,
116
+ metric_list=['AverageAccuracy'],
117
+ few_shot_num=0,
118
+ train_split=None,
119
+ eval_split='train', # only have train split
120
+ )
121
+ class SuperGPQAAdapter(DataAdapter):
122
+
123
+ def __init__(self, **kwargs):
124
+ few_shot_num = kwargs.get('few_shot_num', 0)
125
+ if few_shot_num > 0 and few_shot_num != 5:
126
+ logger.warning(
127
+ f'Only support few_shot_num 0 or 5 for SuperGPQA, but got {few_shot_num}. Use 5-shot by default.')
128
+ kwargs['few_shot_num'] = 5
129
+ super().__init__(**kwargs)
130
+
131
+ self.choices = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']
132
+ self.category_map = SUBSET_MAPPING
133
+ self.few_shot_prompt = open(os.path.join(current_dir, 'five_shot_prompt.txt'), encoding='utf-8').read()
134
+ self.zero_shot_prompt = open(os.path.join(current_dir, 'zero_shot_prompt.txt'), encoding='utf-8').read()
135
+
136
+ def load(self, **kwargs):
137
+ kwargs['subset_list'] = ['default']
138
+ data_dict = super().load(**kwargs)
139
+ return self.reformat_subset(data_dict, subset_key='field', format='{}')
140
+
141
+ def gen_prompt(self, input_d: dict, subset_name: str, few_shot_list: list, **kwargs) -> dict:
142
+ if not self.prompt_template:
143
+ if few_shot_list:
144
+ prompt = self.few_shot_prompt.format(query=input_d['question'])
145
+ else:
146
+ prompt = self.zero_shot_prompt.format(query=input_d['question'])
147
+ else:
148
+ prompt = self.prompt_template.format(query=input_d['question'])
149
+ return self.gen_prompt_data(prompt)
150
+
151
+ def get_gold_answer(self, input_d: dict) -> str:
152
+ # Get the gold choice
153
+ return input_d.get('answer_letter')
154
+
155
+ def parse_pred_result(self, result: str, raw_input_d: dict = None, eval_type: str = EvalType.CHECKPOINT) -> str:
156
+ """
157
+ Parse the model output to get the answer. Could be the best choice index.
158
+
159
+ Args:
160
+ result: Predicted answer from the model. Usually a string for chat.
161
+ raw_input_d: The raw input. Depending on the dataset.
162
+ eval_type: 'checkpoint' or 'service' or 'custom'
163
+
164
+ Returns:
165
+ The parsed answer. Depending on the dataset. Usually a string for chat.
166
+ """
167
+ if self.model_adapter == OutputType.MULTIPLE_CHOICE:
168
+ return result
169
+ else:
170
+ from evalscope.benchmarks.super_gpqa.utils import extract_option_content, extract_option_labels
171
+ sample = raw_input_d
172
+ if self.few_shot_num == 0:
173
+ predict = extract_option_labels(result, 'ABCDEFGHIJ')
174
+ if predict is None:
175
+ predict = extract_option_content(result, sample['options'])
176
+ predict = chr(sample['options'].index(predict) + 65) if predict else None
177
+ else:
178
+ response = result.split('Question:')[0]
179
+ predict = extract_option_labels(response, 'ABCDEFGHIJ')
180
+ if predict is None:
181
+ predict = extract_option_content(response, sample['options'])
182
+ predict = chr(sample['options'].index(predict) + 65) if predict else None
183
+ if predict is None:
184
+ predict = extract_option_labels(result, 'ABCDEFGHIJ')
185
+ if predict is None:
186
+ predict = extract_option_content(result, sample['options'])
187
+ predict = chr(sample['options'].index(predict) + 65) if predict else None
188
+ return predict
189
+
190
+ def match(self, gold: str, pred: str) -> float:
191
+ return exact_match(gold=gold, pred=pred)
@@ -0,0 +1,85 @@
1
+ # flake8: noqa
2
+ import re
3
+
4
+
5
+ def safe_regex_search(pattern, text, flags=0):
6
+ try:
7
+ return re.search(pattern, text, flags)
8
+ except Exception as e:
9
+ print(f'Regex match error: {str(e)}')
10
+ return None
11
+
12
+
13
+ def extract_option_labels(text, options='ABCDEFGHIJ'):
14
+ if not isinstance(text, str) or not isinstance(options, str):
15
+ return 'error'
16
+
17
+ text = text.rstrip()
18
+ last_line = text.split('\n')[-1]
19
+
20
+ option_str = ''.join([chr(65 + i) for i in range(len(options))]) if options else 'ABCDEFGHIJ'
21
+
22
+ patterns = [
23
+ # e.g. "The final answer to this question is: A."
24
+ # "The best option is $\boxed{B}:"
25
+ # "The correct answer is (C)."
26
+ f'[Tt]he\s+(?:\w+\s+)?(?:answer|option)(?:\w+\s+)?\s+is?:?\s*(?:[\*\$\\{{(\[\\\\(]*?(?:(?:\\\\boxed|\\\\mathbf|\\\\mathrm|\\\\text){{)?)*\s*([{option_str}])(?:\\\\?\}}?\$?\)?\]?\}}?)*(?:[\s:\.\*)]|$)',
27
+
28
+ # e.g. "ANSWER: A"
29
+ # "Answer: $\boxed{B}."
30
+ # "ANSWER: (C):"
31
+ f'(?i:Answer)[\*\s]*:\s*(?:[\*\$\\{{(\[\\\\(]*?(?:(?:\\\\boxed|\\\\mathbf|\\\\mathrm|\\\\text){{)?)*\s*([{option_str}])(?:\\\\?\}}?\$?\)?\]?\}}?)*(?:[\s:\.\*)]|$)',
32
+
33
+ # e.g. "A"
34
+ # "$\boxed{B}$"
35
+ # "(C)."
36
+ # "[D]:"
37
+ f'^[^\w\r\n]*(?:[\*\$\\{{(\[\\\\(]*?(?:(?:\\\\boxed|\\\\mathbf|\\\\mathrm|\\\\text){{)?)*\s*([{option_str}])(?:\\\\?\}}?\$?\)?\]?\}}?)*(?:[\s:\.\*)]|$)',
38
+ ]
39
+
40
+ for pattern in patterns:
41
+ match = safe_regex_search(pattern, last_line, re.IGNORECASE)
42
+ if match:
43
+ return match.group(1)
44
+
45
+ for pattern in patterns:
46
+ match = safe_regex_search(pattern, text, re.IGNORECASE)
47
+ if match:
48
+ return match.group(1)
49
+
50
+ return None
51
+
52
+
53
+ def extract_option_content(text, options_content=None):
54
+ if not isinstance(text, str) or not isinstance(options_content, list):
55
+ return 'error'
56
+
57
+ escaped_options_content = [re.escape(option_content) for option_content in options_content]
58
+ escaped_options_content_str = '|'.join(escaped_options_content)
59
+
60
+ text = text.rstrip()
61
+ last_line = text.split('\n')[-1]
62
+
63
+ patterns = [
64
+ f'[Tt]he\s+(?:\w+\s+)?(?:answer|option)(?:\w+\s+)?\s+is:?\s*(?:[\*\$\\{{\(\[\\\\(]*?(?:(?:\\\\boxed|\\\\mathbf|\\\\mathrm|\\\\text){{)?)*\s*({escaped_options_content_str})(?:\\\\?\}}?\$?\)?\]?\}}?)*(?:[\s:\.\*)]|$)',
65
+ f'(?i:Answer)\s*(?:[\*\$\\{{\(\[\\\\(]*?(?:(?:\\\\boxed|\\\\mathbf|\\\\mathrm|\\\\text){{)?)*\s*({escaped_options_content_str})(?:\\\\?\}}?\$?\)?\]?\}}?)*(?:[\s:\.\*)]|$)',
66
+ f'^[^\w\r\n]*(?:[\*\$\\{{\(\[\\\\(]*?(?:(?:\\\\boxed|\\\\mathbf|\\\\mathrm|\\\\text){{)?)*\s*({escaped_options_content_str})(?:\\\\?\}}?\$?\)?\]?\}}?)*(?:[\s:\.\*)]|$)',
67
+ ]
68
+
69
+ for pattern in patterns:
70
+ match = safe_regex_search(pattern, last_line)
71
+ if match:
72
+ if match.group(1) in escaped_options_content:
73
+ return options_content[escaped_options_content.index(match.group(1))]
74
+ else:
75
+ return match.group(1)
76
+
77
+ for pattern in patterns:
78
+ match = safe_regex_search(pattern, text)
79
+ if match:
80
+ if match.group(1) in escaped_options_content:
81
+ return options_content[escaped_options_content.index(match.group(1))]
82
+ else:
83
+ return match.group(1)
84
+
85
+ return None
@@ -0,0 +1,3 @@
1
+ Answer the following multiple choice question. There is only one correct answer. The last line of your response should be in the format 'Answer: $LETTER' (without quotes), where LETTER is one of A, B, C, D, E, F, G, H, I, or J.
2
+
3
+ {query}
@@ -5,8 +5,7 @@ import os
5
5
 
6
6
  from evalscope.benchmarks import Benchmark
7
7
  from evalscope.benchmarks.data_adapter import DataAdapter
8
- from evalscope.constants import EvalType
9
- from evalscope.models import ChatGenerationModelAdapter
8
+ from evalscope.constants import EvalType, OutputType
10
9
  from evalscope.utils import get_logger
11
10
 
12
11
  # flake8: noqa
@@ -16,8 +15,8 @@ logger = get_logger()
16
15
 
17
16
  @Benchmark.register(
18
17
  name='trivia_qa',
18
+ pretty_name='TriviaQA',
19
19
  dataset_id='modelscope/trivia_qa',
20
- model_adapter=ChatGenerationModelAdapter,
21
20
  subset_list=['default'],
22
21
  metric_list=['AverageAccuracy'],
23
22
  few_shot_num=5,
@@ -100,7 +99,7 @@ class TriviaQaAdapter(DataAdapter):
100
99
  context += self._generate_prompt(input_d=input_d, include_answer=False)
101
100
  full_prompt = context
102
101
 
103
- return {'data': [full_prompt], 'system_prompt': prompt or self.prompt_template}
102
+ return self.gen_prompt_data(full_prompt)
104
103
 
105
104
  def get_gold_answer(self, input_d: dict) -> list:
106
105
  # Get the gold choice
@@ -8,8 +8,7 @@ from typing import List
8
8
 
9
9
  from evalscope.benchmarks import Benchmark
10
10
  from evalscope.benchmarks.data_adapter import DataAdapter
11
- from evalscope.constants import EvalType
12
- from evalscope.models import ContinuationLogitsModelAdapter
11
+ from evalscope.constants import EvalType, OutputType
13
12
  from evalscope.utils import get_logger
14
13
 
15
14
  # flake8: noqa
@@ -21,8 +20,10 @@ logger = get_logger()
21
20
 
22
21
  @Benchmark.register(
23
22
  name='truthful_qa',
23
+ pretty_name='TruthfulQA',
24
24
  dataset_id='modelscope/truthful_qa',
25
- model_adapter=ContinuationLogitsModelAdapter,
25
+ model_adapter=OutputType.CONTINUOUS,
26
+ output_types=[OutputType.CONTINUOUS, OutputType.GENERATION],
26
27
  subset_list=['multiple_choice'],
27
28
  metric_list=['AverageAccuracy'],
28
29
  few_shot_num=0,
@@ -195,8 +196,7 @@ class TruthfulQaAdapter(DataAdapter):
195
196
  else:
196
197
  raise ValueError(f'** Unknown subset_name: {subset_name}')
197
198
 
198
- prompt_d = {'data': ctx_continuation_pair_list}
199
- return prompt_d
199
+ return self.gen_prompt_data(ctx_continuation_pair_list)
200
200
 
201
201
  def get_gold_answer(self, input_d: dict) -> dict:
202
202
  # Get the gold choice
@@ -215,14 +215,7 @@ class TruthfulQaAdapter(DataAdapter):
215
215
  Returns:
216
216
  The predicted answer.
217
217
  """
218
- if eval_type == EvalType.CHECKPOINT:
219
- return result
220
- elif eval_type == EvalType.SERVICE: # TODO: to be supported !
221
- return result
222
- elif eval_type == EvalType.CUSTOM: # TODO: to be supported !
223
- return result
224
- else:
225
- raise ValueError(f'Invalid eval_type: {eval_type}')
218
+ return result
226
219
 
227
220
  def match(self, gold: dict, pred: list) -> dict:
228
221
  """
@@ -0,0 +1,43 @@
1
+ from dataclasses import dataclass
2
+ from functools import wraps
3
+ from typing import Dict, List, Optional
4
+
5
+ from evalscope.constants import EvalType
6
+ from evalscope.utils.filters import Filter
7
+
8
+
9
+ @dataclass
10
+ class PromptData:
11
+ data: List[str]
12
+ index: Optional[int] = 0
13
+ system_prompt: Optional[str] = None
14
+ multi_choices: Optional[List[str]] = None
15
+
16
+ def to_dict(self) -> Dict:
17
+ if self.multi_choices is None:
18
+ return {
19
+ 'data': self.data,
20
+ 'index': self.index,
21
+ 'system_prompt': self.system_prompt,
22
+ }
23
+ else:
24
+ return {
25
+ 'data': self.data,
26
+ 'index': self.index,
27
+ 'system_prompt': self.system_prompt,
28
+ 'multi_choices': self.multi_choices,
29
+ }
30
+
31
+
32
+ def preprocess_decorator(func):
33
+
34
+ @wraps(func)
35
+ def wrapper(self, result: str, raw_input_d: dict = None, eval_type: str = EvalType.CHECKPOINT):
36
+ filters = self.config_kwargs.get('filters', None)
37
+ if filters:
38
+ # Apply filters to the resultply filters to the result
39
+ for filter_name, filter_value in filters.items():
40
+ result = Filter.apply(filter_name, result, filter_value)
41
+ return func(self, result, raw_input_d, eval_type)
42
+
43
+ return wrapper
@@ -12,7 +12,7 @@ from evalscope.collections.sampler import DatasetEntry
12
12
  from evalscope.config import TaskConfig
13
13
  from evalscope.constants import AnswerKeys, DumpMode, EvalType
14
14
  from evalscope.evaluator import Evaluator
15
- from evalscope.models import get_local_model, initialize_model_adapter
15
+ from evalscope.models import initialize_model_adapter
16
16
  from evalscope.report import ReportGenerator
17
17
  from evalscope.utils.io_utils import OutputsStructure, dump_jsonl_data, jsonl_to_list
18
18
  from evalscope.utils.logger import get_logger
@@ -53,11 +53,11 @@ class SimpleEvaluator(Evaluator):
53
53
 
54
54
  class EvaluatorCollection:
55
55
 
56
- def __init__(self, task_cfg: TaskConfig, data_adapter: DataAdapter, outputs: OutputsStructure):
56
+ def __init__(self, task_cfg: TaskConfig, data_adapter: DataAdapter, outputs: OutputsStructure, base_model):
57
57
  self.task_cfg = task_cfg
58
58
  self.data_adapter = data_adapter
59
59
  self.outputs = outputs
60
- self.model = get_local_model(task_cfg)
60
+ self.model = base_model
61
61
 
62
62
  self.dataset, self.dataset_name = self.load()
63
63
  self.dataset_name_map = EvaluatorCollection._init_name_map(self.dataset)
@@ -97,8 +97,8 @@ class EvaluatorCollection:
97
97
  evaluators = {}
98
98
  for dataset_name in self.dataset_name_map.keys():
99
99
  benchmark = Benchmark.get(dataset_name)
100
+ model_adapter = initialize_model_adapter(self.task_cfg, benchmark, self.model)
100
101
  data_adapter = benchmark.get_data_adapter()
101
- model_adapter = initialize_model_adapter(self.task_cfg, benchmark.model_adapter, self.model)
102
102
  evaluators[dataset_name] = SimpleEvaluator(dataset_name, data_adapter, model_adapter, self.task_cfg,
103
103
  self.outputs)
104
104
  return evaluators
@@ -238,7 +238,16 @@ class EvaluatorCollection:
238
238
  if self.task_cfg.use_cache and os.path.exists(review_file_path):
239
239
  logger.warning(
240
240
  f'Ignore use_cache={self.task_cfg.use_cache}, updating the review file: {review_file_path} ...')
241
- os.remove(review_file_path)
241
+ if os.path.isdir(review_file_path):
242
+ for filename in os.listdir(review_file_path):
243
+ file_path = os.path.join(review_file_path, filename)
244
+ try:
245
+ if os.path.isfile(file_path):
246
+ os.remove(file_path)
247
+ except Exception as e:
248
+ logger.error(f'Error deleting file {file_path}: {e}')
249
+ else:
250
+ os.remove(review_file_path)
242
251
 
243
252
  reviews = defaultdict(dict)
244
253
  for sample in tqdm(self.dataset, desc='Getting reviews'):
evalscope/config.py CHANGED
@@ -4,10 +4,12 @@ import copy
4
4
  import json
5
5
  import os
6
6
  from argparse import Namespace
7
+ from collections import OrderedDict
7
8
  from dataclasses import dataclass, field
8
9
  from typing import Dict, List, Optional, Union
9
10
 
10
- from evalscope.constants import DEFAULT_DATASET_CACHE_DIR, DEFAULT_WORK_DIR, EvalBackend, EvalStage, EvalType, HubType
11
+ from evalscope.constants import (DEFAULT_DATASET_CACHE_DIR, DEFAULT_WORK_DIR, EvalBackend, EvalStage, EvalType, HubType,
12
+ JudgeStrategy, OutputType)
11
13
  from evalscope.models.custom import CustomModel
12
14
  from evalscope.utils import gen_hash
13
15
  from evalscope.utils.io_utils import dict_to_yaml, json_to_dict, yaml_to_dict
@@ -54,7 +56,7 @@ class TaskConfig:
54
56
  eval_config: Union[str, Dict, None] = None
55
57
  stage: str = EvalStage.ALL
56
58
  limit: Optional[int] = None
57
- eval_batch_size: int = 1
59
+ eval_batch_size: Optional[int] = None
58
60
 
59
61
  # Cache and working directory arguments
60
62
  mem_cache: bool = False # Deprecated, will be removed in v1.0.0.
@@ -71,12 +73,23 @@ class TaskConfig:
71
73
  timeout: Optional[float] = None # Only used for server model
72
74
  stream: bool = False # Only used for server model
73
75
 
76
+ # LLMJudge arguments
77
+ judge_strategy: str = JudgeStrategy.AUTO
78
+ judge_worker_num: int = 8
79
+ judge_model_args: Optional[Dict] = field(default_factory=lambda: {})
80
+
74
81
  def __post_init__(self):
75
82
  if (not self.model_id) and self.model:
76
83
  if isinstance(self.model, CustomModel):
77
84
  self.model_id = type(self.model).__name__
78
85
  else:
79
86
  self.model_id = os.path.basename(self.model).rstrip(os.sep)
87
+ # fix path error, see http://github.com/modelscope/evalscope/issues/377
88
+ self.model_id = self.model_id.replace(':', '-')
89
+
90
+ # Set default eval_batch_size based on eval_type
91
+ if self.eval_batch_size is None:
92
+ self.eval_batch_size = 8 if self.eval_type == EvalType.SERVICE else 1
80
93
 
81
94
  def to_dict(self):
82
95
  return self.__dict__
evalscope/constants.py CHANGED
@@ -139,6 +139,13 @@ class EvalType:
139
139
  SERVICE = 'service' # model service
140
140
 
141
141
 
142
+ class OutputType:
143
+ LOGITS = 'logits' # for multiple choice tasks
144
+ GENERATION = 'generation' # for text generation tasks and general tasks
145
+ MULTIPLE_CHOICE = 'multiple_choice_logits' # for multiple choice tasks
146
+ CONTINUOUS = 'continuous_logits' # for continuous tasks
147
+
148
+
142
149
  class EvalBackend:
143
150
  NATIVE = 'Native'
144
151
  OPEN_COMPASS = 'OpenCompass'
@@ -149,3 +156,10 @@ class EvalBackend:
149
156
 
150
157
  class DataCollection:
151
158
  NAME = 'data_collection'
159
+
160
+
161
+ class JudgeStrategy:
162
+ AUTO = 'auto'
163
+ RULE = 'rule'
164
+ LLM = 'llm'
165
+ LLM_RECALL = 'llm_recall'
@@ -11,7 +11,7 @@ from typing import Any, Dict, List, Optional, Union
11
11
 
12
12
  from evalscope.benchmarks import DataAdapter
13
13
  from evalscope.config import TaskConfig
14
- from evalscope.constants import AnswerKeys, DumpMode, EvalStage, EvalType, ReviewKeys
14
+ from evalscope.constants import AnswerKeys, DumpMode, EvalStage, EvalType, JudgeStrategy, ReviewKeys
15
15
  from evalscope.models import BaseModelAdapter
16
16
  from evalscope.report import Report, gen_table
17
17
  from evalscope.utils import dict_torch_dtype_to_str, gen_hash
@@ -58,9 +58,17 @@ class Evaluator(object):
58
58
  self.task_cfg = task_cfg
59
59
  # Deal with the output paths
60
60
  self.outputs_structure = outputs
61
-
62
61
  self.kwargs = kwargs
63
62
 
63
+ self._init_judge()
64
+
65
+ def _init_judge(self):
66
+ if self.task_cfg.judge_strategy == JudgeStrategy.RULE:
67
+ self.judge = None
68
+ else:
69
+ from evalscope.metrics.llm_judge import LLMJudge
70
+ self.judge = LLMJudge(**self.task_cfg.judge_model_args)
71
+
64
72
  def load_dataset(self):
65
73
  dataset = self.data_adapter.load(
66
74
  work_dir=os.path.expanduser(self.task_cfg.dataset_dir), datasets_hub=self.dataset_hub, **self.kwargs)
@@ -200,17 +208,40 @@ class Evaluator(object):
200
208
  for choice in choices:
201
209
  raw_input_d: dict = review_res[AnswerKeys.RAW_INPUT]
202
210
  answer_content = choice[ReviewKeys.MESSAGE][ReviewKeys.CONTENT]
203
- answer_content = self.data_adapter.parse_pred_result(
204
- result=answer_content, raw_input_d=raw_input_d, eval_type=self.eval_type)
205
211
  gold_content = self.data_adapter.get_gold_answer(raw_input_d)
206
212
 
207
- review_result = self.data_adapter.match(gold_content, answer_content)
213
+ # Get review result based on judge strategy
214
+ use_llm = (
215
+ self.task_cfg.judge_strategy == JudgeStrategy.LLM
216
+ or (self.task_cfg.judge_strategy == JudgeStrategy.AUTO and self.data_adapter.llm_as_a_judge))
217
+
218
+ if use_llm:
219
+ # Use LLM as judge
220
+ assert self.judge is not None, f'Judge model is required for LLM judging {self.data_adapter.name}'
221
+ review_result = self.data_adapter.llm_match(
222
+ gold_content, answer_content, self.judge, raw_input=raw_input_d)
223
+ pred = answer_content
224
+ else:
225
+ # Use rule-based judging
226
+ pred_content = self.data_adapter.parse_pred_result(
227
+ result=answer_content, raw_input_d=raw_input_d, eval_type=self.eval_type)
228
+ review_result = self.data_adapter.match(gold_content, pred_content)
229
+
230
+ # For LLM_RECALL strategy, use LLM to re-judge if rule-based result is not good
231
+ if (self.task_cfg.judge_strategy == JudgeStrategy.LLM_RECALL
232
+ and isinstance(review_result, (bool, int, float)) and not bool(review_result)):
233
+ assert self.judge is not None, f'Judge model is required for LLM_RECALL strategy {self.data_adapter.name}' # noqa: E501
234
+ review_result = self.data_adapter.llm_match(
235
+ gold_content, answer_content, self.judge, raw_input=raw_input_d)
236
+ pred = answer_content
237
+ else:
238
+ pred = pred_content
239
+
208
240
  choice[ReviewKeys.REVIEW] = {
209
241
  ReviewKeys.GOLD: gold_content,
210
- ReviewKeys.PRED: answer_content,
242
+ ReviewKeys.PRED: pred,
211
243
  ReviewKeys.RESULT: review_result
212
244
  }
213
-
214
245
  rev_choices.append(choice)
215
246
 
216
247
  review_res[AnswerKeys.CHOICES] = rev_choices
@@ -252,16 +283,23 @@ class Evaluator(object):
252
283
  logger.warning(f'Ignore use_cache={self.use_cache}, updating the review file: {review_file_path} ...')
253
284
  os.remove(review_file_path)
254
285
 
255
- for answer_d in tqdm(answers_list, total=len(answers_list), desc=f'Reviewing({subset_name}): '):
286
+ def process_single_review(answer_d):
256
287
  review_id, reviewer_spec = self._generate_review_id(answer_d)
257
288
  # Get review
258
289
  review_d = self._get_review(answer_d=answer_d, review_id=review_id, reviewer_spec=reviewer_spec)
259
-
260
290
  logger.debug(review_d)
261
-
262
- reviews_list.append(review_d)
263
- # Dump reviews
264
- dump_jsonl_data(review_d, review_file_path, dump_mode=DumpMode.APPEND)
291
+ return review_d
292
+
293
+ with ThreadPoolExecutor(max_workers=self.task_cfg.judge_worker_num) as executor:
294
+ # Submit all tasks and get futures
295
+ futures = [executor.submit(process_single_review, answer_d) for answer_d in answers_list]
296
+
297
+ # Process completed futures with progress bar
298
+ for future in tqdm(as_completed(futures), total=len(futures), desc=f'Reviewing({subset_name}): '):
299
+ review_d = future.result()
300
+ reviews_list.append(review_d)
301
+ # Dump reviews
302
+ dump_jsonl_data(review_d, review_file_path, dump_mode=DumpMode.APPEND)
265
303
 
266
304
  return reviews_list
267
305