evalscope 0.12.1__py3-none-any.whl → 0.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (43) hide show
  1. evalscope/arguments.py +6 -1
  2. evalscope/benchmarks/arc/arc_adapter.py +3 -3
  3. evalscope/benchmarks/benchmark.py +3 -2
  4. evalscope/benchmarks/ceval/ceval_adapter.py +2 -1
  5. evalscope/benchmarks/chinese_simple_qa/__init__.py +0 -0
  6. evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +168 -0
  7. evalscope/benchmarks/cmmlu/cmmlu_adapter.py +2 -1
  8. evalscope/benchmarks/data_adapter.py +32 -4
  9. evalscope/benchmarks/general_qa/general_qa_adapter.py +5 -4
  10. evalscope/benchmarks/hellaswag/hellaswag_adapter.py +20 -24
  11. evalscope/benchmarks/humaneval/humaneval_adapter.py +8 -5
  12. evalscope/benchmarks/live_code_bench/__init__.py +0 -0
  13. evalscope/benchmarks/live_code_bench/evaluate_utils.py +193 -0
  14. evalscope/benchmarks/live_code_bench/execute_utils.py +267 -0
  15. evalscope/benchmarks/live_code_bench/extract_utils.py +70 -0
  16. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +90 -0
  17. evalscope/benchmarks/live_code_bench/load_utils.py +71 -0
  18. evalscope/benchmarks/live_code_bench/pass_k_utils.py +56 -0
  19. evalscope/benchmarks/live_code_bench/prompts.py +207 -0
  20. evalscope/benchmarks/live_code_bench/testing_util.py +721 -0
  21. evalscope/benchmarks/mmlu/mmlu_adapter.py +3 -2
  22. evalscope/benchmarks/simple_qa/simple_qa_adapter.py +148 -1
  23. evalscope/benchmarks/super_gpqa/utils.py +0 -5
  24. evalscope/collections/evaluator.py +3 -3
  25. evalscope/config.py +6 -1
  26. evalscope/constants.py +7 -0
  27. evalscope/evaluator/evaluator.py +51 -13
  28. evalscope/metrics/llm_judge.py +104 -0
  29. evalscope/perf/benchmark.py +5 -0
  30. evalscope/perf/http_client.py +9 -1
  31. evalscope/perf/main.py +1 -0
  32. evalscope/run.py +1 -1
  33. evalscope/third_party/longbench_write/infer.py +1 -1
  34. evalscope/version.py +2 -2
  35. {evalscope-0.12.1.dist-info → evalscope-0.13.0.dist-info}/METADATA +25 -10
  36. {evalscope-0.12.1.dist-info → evalscope-0.13.0.dist-info}/RECORD +43 -30
  37. tests/cli/test_all.py +144 -0
  38. tests/cli/test_collection.py +27 -1
  39. tests/cli/test_run.py +72 -10
  40. {evalscope-0.12.1.dist-info → evalscope-0.13.0.dist-info}/LICENSE +0 -0
  41. {evalscope-0.12.1.dist-info → evalscope-0.13.0.dist-info}/WHEEL +0 -0
  42. {evalscope-0.12.1.dist-info → evalscope-0.13.0.dist-info}/entry_points.txt +0 -0
  43. {evalscope-0.12.1.dist-info → evalscope-0.13.0.dist-info}/top_level.txt +0 -0
@@ -144,7 +144,8 @@ SUBJECT_MAPPING = {
144
144
  few_shot_num=5,
145
145
  train_split='train',
146
146
  eval_split='test',
147
- prompt_template='The following are multiple choice questions (with answers) about {subset_name}. \n{query}',
147
+ prompt_template=
148
+ 'Answer the following multiple choice question about {subset_name}. There is only one correct answer. The last line of your response should be in the format "Answer: LETTER" (without quotes), where LETTER is one of A, B, C, D. \n{query}',
148
149
  )
149
150
  class MMLUAdapter(DataAdapter):
150
151
 
@@ -248,7 +249,7 @@ class MMLUAdapter(DataAdapter):
248
249
  if self.model_adapter == OutputType.MULTIPLE_CHOICE:
249
250
  return result
250
251
  else:
251
- return ResponseParser.parse_first_option(result, self.choices)
252
+ return ResponseParser.parse_first_option(result)
252
253
 
253
254
  def match(self, gold: str, pred: str) -> float:
254
255
  return exact_match(gold=gold, pred=pred)
@@ -1,16 +1,103 @@
1
+ import re
2
+ from collections import defaultdict
3
+ from typing import Any, List
4
+
1
5
  from evalscope.benchmarks import Benchmark, DataAdapter
6
+ from evalscope.metrics import Metric, mean, metric_registry
7
+ from evalscope.metrics.llm_judge import LLMJudge
2
8
  from evalscope.utils.logger import get_logger
3
9
 
4
10
  # flake8: noqa
5
11
 
6
12
  logger = get_logger()
7
13
 
14
+ GRADER_TEMPLATE = """
15
+ Your job is to look at a question, a gold target, and a predicted answer, and then assign a grade of either ["CORRECT", "INCORRECT", "NOT_ATTEMPTED"].
16
+ First, I will give examples of each grade, and then you will grade a new example.
17
+
18
+
19
+ The following are examples of CORRECT predicted answers.
20
+ ```
21
+ Question: What are the names of Barack Obama's children?
22
+ Gold target: Malia Obama and Sasha Obama
23
+ Predicted answer 1: sasha and malia obama
24
+ Predicted answer 2: most people would say Malia and Sasha, but I'm not sure and would have to double check
25
+ Predicted answer 3: Barack Obama has two daughters. Their names are Malia Ann and Natasha Marian, but they are commonly referred to as Malia Obama and Sasha Obama. Malia was born on July 4, 1998, and Sasha was born on June 10, 2001.
26
+ ```
27
+ These predicted answers are all CORRECT because:
28
+ - They fully contain the important information in the gold target.
29
+ - They do not contain any information that contradicts the gold target.
30
+ - Only semantic meaning matters; capitalization, punctuation, grammar, and order don't matter.
31
+ - Hedging and guessing are permissible, provided that the gold target is fully included and the response contains no incorrect information or contradictions.
32
+
33
+
34
+ The following are examples of INCORRECT predicted answers.
35
+ ```
36
+ Question: What are the names of Barack Obama's children?
37
+ Gold target: Malia and Sasha
38
+ Predicted answer 1: Malia.
39
+ Predicted answer 2: Malia, Sasha, and Susan.
40
+ Predicted answer 3: Barack Obama does not have any children.
41
+ Predicted answer 4: I think it's either Malia and Sasha. Or it could be Malia and Jackie. Or it could be Joey and Malia.
42
+ Predicted answer 4: While I don't know their exact names, I can tell you that Barack Obama has three children.
43
+ Predicted answer 5: It's possible you may mean Betsy and Olivia. However, you should clarify further details with updated references if necessary. Is that the correct answer?
44
+ Predicted answer 6: It may be the case that Obama's child is named James. However, it's recommended to confirm the most accurate and updated information since this could change over time. This model may not always reflect the most current information.
45
+ ```
46
+ These predicted answers are all INCORRECT because:
47
+ - A factual statement in the answer contradicts the gold target. Incorrect statements that have some hedging (e.g., "it is possible that", "although i'm not sure, i think") are also considered incorrect.
48
+
49
+
50
+ The following are examples of NOT_ATTEMPTED predicted answers.
51
+ ```
52
+ Question: What are the names of Barack Obama's children?
53
+ Gold target: Malia and Sasha
54
+ Predicted answer 1: I don't know.
55
+ Predicted answer 2: I need more context about which Obama you are talking about.
56
+ Predicted answer 3: Without researching the web, I cannot answer this question. However, I can tell you that Barack Obama has two children.
57
+ Predicted answer 4: Barack Obama has two children. I know that one of them is Malia, but I'm not sure about the other one.
58
+ ```
59
+ These predicted answers are all NOT_ATTEMPTED because:
60
+ - The important information in the gold target is not included in the answer.
61
+ - No statements in the answer contradict the gold target.
62
+
63
+
64
+ Also note the following things:
65
+ - For grading questions where the gold target is a number, the predicted answer needs to be correct to the last significant figure in the gold answer. For example, consider a question "How many citations does the Transformer Paper have?" with gold target "120k".
66
+ - Predicted answers "120k", "124k", and 115k" are all CORRECT.
67
+ - Predicted answers "100k" and "113k" are INCORRECT.
68
+ - Predicted answers "around 100k" and "more than 50k" are considered NOT_ATTEMPTED because they neither confirm nor contradict the gold target.
69
+ - The gold target may contain more information than the question. In such cases, the predicted answer only needs to contain the information that is in the question.
70
+ - For example, consider the question "What episode did Derek and Meredith get legally married in Grey's Anatomy?" with gold target "Season 7, Episode 20: White Wedding". Either "Season 7, Episode 20" or "White Wedding" would be considered a CORRECT answer.
71
+ - Do not punish predicted answers if they omit information that would be clearly inferred from the question.
72
+ - For example, consider the question "What city is OpenAI headquartered in?" and the gold target "San Francisco, California". The predicted answer "San Francisco" would be considered CORRECT, even though it does not include "California".
73
+ - Consider the question "What award did A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity win at NAACL '24?", the gold target is "Outstanding Paper Award". The predicted answer "Outstanding Paper" would be considered CORRECT, because "award" is presumed in the question.
74
+ - For the question "What is the height of Jason Wei in meters?", the gold target is "1.73 m". The predicted answer "1.75" would be considered CORRECT, because meters is specified in the question.
75
+ - For the question "What is the name of Barack Obama's wife?", the gold target is "Michelle Obama". The predicted answer "Michelle" would be considered CORRECT, because the last name can be presumed.
76
+ - Do not punish for typos in people's name if it's clearly the same name.
77
+ - For example, if the gold target is "Hyung Won Chung", you can consider the following predicted answers as correct: "Hyoong Won Choong", "Hyungwon Chung", or "Hyun Won Chung".
78
+
79
+
80
+ Here is a new example. Simply reply with either CORRECT, INCORRECT, NOT ATTEMPTED. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
81
+ ```
82
+ Question: {question}
83
+ Gold target: {target}
84
+ Predicted answer: {predicted_answer}
85
+ ```
86
+
87
+ Grade the predicted answer of this new question as one of:
88
+ A: CORRECT
89
+ B: INCORRECT
90
+ C: NOT_ATTEMPTED
91
+
92
+ Just return the letters "A", "B", or "C", with no text around it.
93
+ """.strip() # noqa: E501
94
+
8
95
 
9
96
  @Benchmark.register(
10
97
  name='simple_qa',
11
98
  pretty_name='SimpleQA',
12
99
  dataset_id='AI-ModelScope/SimpleQA',
13
- metric_list=['AverageAccuracy'],
100
+ metric_list=['is_correct', 'is_incorrect', 'is_not_attempted'],
14
101
  few_shot_num=0,
15
102
  train_split=None,
16
103
  eval_split='test')
@@ -18,3 +105,63 @@ class SimpleQAAdapter(DataAdapter):
18
105
 
19
106
  def __init__(self, *args, **kwargs):
20
107
  super().__init__(*args, **kwargs)
108
+
109
+ # register metrics
110
+ metric_registry.register(Metric(name='is_correct', object=mean))
111
+ metric_registry.register(Metric(name='is_incorrect', object=mean))
112
+ metric_registry.register(Metric(name='is_not_attempted', object=mean))
113
+
114
+ # whether to use LLM as a judge
115
+ self.llm_as_a_judge = True
116
+
117
+ def gen_prompt(self, input_d: dict, subset_name: str, few_shot_list: list, **kwargs) -> dict:
118
+ question = input_d['problem']
119
+ return self.gen_prompt_data(question)
120
+
121
+ def get_gold_answer(self, input_d: dict) -> str:
122
+ return input_d['answer']
123
+
124
+ def parse_pred_result(self, result: str, raw_input_d: dict = None, **kwargs) -> str:
125
+ return result.strip()
126
+
127
+ def match(self, gold: str, pred: str) -> float:
128
+ # simple match
129
+ logger.warning(f'Please use LLMJudge to match the result for SimpleQA')
130
+ is_correct = 1 if gold.lower().strip() == pred.lower().strip() else 0
131
+ is_incorrect = not is_correct
132
+ is_not_attempted = 0
133
+ return {
134
+ 'is_correct': is_correct,
135
+ 'is_incorrect': is_incorrect,
136
+ 'is_not_attempted': is_not_attempted,
137
+ }
138
+
139
+ def llm_match(self, gold: Any, pred: Any, judge: LLMJudge, **kwargs) -> dict:
140
+ raw_input = kwargs.get('raw_input', None)
141
+ question = raw_input['problem']
142
+ # get grading response
143
+ prompt = GRADER_TEMPLATE.format(question=question, target=gold, predicted_answer=pred)
144
+ grading_response = judge(prompt)
145
+ # parse grading response
146
+ match = re.search(r'(A|B|C)', grading_response)
147
+ res = match.group(0) if match else 'C'
148
+ return {
149
+ 'is_correct': 1 if res == 'A' else 0,
150
+ 'is_incorrect': 1 if res == 'B' else 0,
151
+ 'is_not_attempted': 1 if res == 'C' else 0,
152
+ }
153
+
154
+ def compute_metric(self, review_res_list: List[dict], **kwargs) -> List[dict]:
155
+ """
156
+ compute weighted mean of the bleu score of all samples
157
+
158
+ Args:
159
+ review_res_list: [{'is_correct': 1, 'is_incorrect': 0, 'is_not_attempted': 0}, ...]
160
+ """
161
+ # zip dict answers
162
+ res_dict = defaultdict(list)
163
+ for res in review_res_list:
164
+ for key, value in res.items():
165
+ res_dict[key].append(value)
166
+
167
+ return super().compute_metric(res_dict, **kwargs)
@@ -1,15 +1,10 @@
1
1
  # flake8: noqa
2
2
  import re
3
- import timeout_decorator
4
3
 
5
4
 
6
- @timeout_decorator.timeout(5) # 5 seconds timeout
7
5
  def safe_regex_search(pattern, text, flags=0):
8
6
  try:
9
7
  return re.search(pattern, text, flags)
10
- except timeout_decorator.TimeoutError:
11
- print(f'Regex match timeout: pattern={pattern}, text={text[:100]}...')
12
- return None
13
8
  except Exception as e:
14
9
  print(f'Regex match error: {str(e)}')
15
10
  return None
@@ -12,7 +12,7 @@ from evalscope.collections.sampler import DatasetEntry
12
12
  from evalscope.config import TaskConfig
13
13
  from evalscope.constants import AnswerKeys, DumpMode, EvalType
14
14
  from evalscope.evaluator import Evaluator
15
- from evalscope.models import get_local_model, initialize_model_adapter
15
+ from evalscope.models import initialize_model_adapter
16
16
  from evalscope.report import ReportGenerator
17
17
  from evalscope.utils.io_utils import OutputsStructure, dump_jsonl_data, jsonl_to_list
18
18
  from evalscope.utils.logger import get_logger
@@ -53,11 +53,11 @@ class SimpleEvaluator(Evaluator):
53
53
 
54
54
  class EvaluatorCollection:
55
55
 
56
- def __init__(self, task_cfg: TaskConfig, data_adapter: DataAdapter, outputs: OutputsStructure):
56
+ def __init__(self, task_cfg: TaskConfig, data_adapter: DataAdapter, outputs: OutputsStructure, base_model):
57
57
  self.task_cfg = task_cfg
58
58
  self.data_adapter = data_adapter
59
59
  self.outputs = outputs
60
- self.model = get_local_model(task_cfg)
60
+ self.model = base_model
61
61
 
62
62
  self.dataset, self.dataset_name = self.load()
63
63
  self.dataset_name_map = EvaluatorCollection._init_name_map(self.dataset)
evalscope/config.py CHANGED
@@ -9,7 +9,7 @@ from dataclasses import dataclass, field
9
9
  from typing import Dict, List, Optional, Union
10
10
 
11
11
  from evalscope.constants import (DEFAULT_DATASET_CACHE_DIR, DEFAULT_WORK_DIR, EvalBackend, EvalStage, EvalType, HubType,
12
- OutputType)
12
+ JudgeStrategy, OutputType)
13
13
  from evalscope.models.custom import CustomModel
14
14
  from evalscope.utils import gen_hash
15
15
  from evalscope.utils.io_utils import dict_to_yaml, json_to_dict, yaml_to_dict
@@ -73,6 +73,11 @@ class TaskConfig:
73
73
  timeout: Optional[float] = None # Only used for server model
74
74
  stream: bool = False # Only used for server model
75
75
 
76
+ # LLMJudge arguments
77
+ judge_strategy: str = JudgeStrategy.AUTO
78
+ judge_worker_num: int = 8
79
+ judge_model_args: Optional[Dict] = field(default_factory=lambda: {})
80
+
76
81
  def __post_init__(self):
77
82
  if (not self.model_id) and self.model:
78
83
  if isinstance(self.model, CustomModel):
evalscope/constants.py CHANGED
@@ -156,3 +156,10 @@ class EvalBackend:
156
156
 
157
157
  class DataCollection:
158
158
  NAME = 'data_collection'
159
+
160
+
161
+ class JudgeStrategy:
162
+ AUTO = 'auto'
163
+ RULE = 'rule'
164
+ LLM = 'llm'
165
+ LLM_RECALL = 'llm_recall'
@@ -11,7 +11,7 @@ from typing import Any, Dict, List, Optional, Union
11
11
 
12
12
  from evalscope.benchmarks import DataAdapter
13
13
  from evalscope.config import TaskConfig
14
- from evalscope.constants import AnswerKeys, DumpMode, EvalStage, EvalType, ReviewKeys
14
+ from evalscope.constants import AnswerKeys, DumpMode, EvalStage, EvalType, JudgeStrategy, ReviewKeys
15
15
  from evalscope.models import BaseModelAdapter
16
16
  from evalscope.report import Report, gen_table
17
17
  from evalscope.utils import dict_torch_dtype_to_str, gen_hash
@@ -58,9 +58,17 @@ class Evaluator(object):
58
58
  self.task_cfg = task_cfg
59
59
  # Deal with the output paths
60
60
  self.outputs_structure = outputs
61
-
62
61
  self.kwargs = kwargs
63
62
 
63
+ self._init_judge()
64
+
65
+ def _init_judge(self):
66
+ if self.task_cfg.judge_strategy == JudgeStrategy.RULE:
67
+ self.judge = None
68
+ else:
69
+ from evalscope.metrics.llm_judge import LLMJudge
70
+ self.judge = LLMJudge(**self.task_cfg.judge_model_args)
71
+
64
72
  def load_dataset(self):
65
73
  dataset = self.data_adapter.load(
66
74
  work_dir=os.path.expanduser(self.task_cfg.dataset_dir), datasets_hub=self.dataset_hub, **self.kwargs)
@@ -200,17 +208,40 @@ class Evaluator(object):
200
208
  for choice in choices:
201
209
  raw_input_d: dict = review_res[AnswerKeys.RAW_INPUT]
202
210
  answer_content = choice[ReviewKeys.MESSAGE][ReviewKeys.CONTENT]
203
- answer_content = self.data_adapter.parse_pred_result(
204
- result=answer_content, raw_input_d=raw_input_d, eval_type=self.eval_type)
205
211
  gold_content = self.data_adapter.get_gold_answer(raw_input_d)
206
212
 
207
- review_result = self.data_adapter.match(gold_content, answer_content)
213
+ # Get review result based on judge strategy
214
+ use_llm = (
215
+ self.task_cfg.judge_strategy == JudgeStrategy.LLM
216
+ or (self.task_cfg.judge_strategy == JudgeStrategy.AUTO and self.data_adapter.llm_as_a_judge))
217
+
218
+ if use_llm:
219
+ # Use LLM as judge
220
+ assert self.judge is not None, f'Judge model is required for LLM judging {self.data_adapter.name}'
221
+ review_result = self.data_adapter.llm_match(
222
+ gold_content, answer_content, self.judge, raw_input=raw_input_d)
223
+ pred = answer_content
224
+ else:
225
+ # Use rule-based judging
226
+ pred_content = self.data_adapter.parse_pred_result(
227
+ result=answer_content, raw_input_d=raw_input_d, eval_type=self.eval_type)
228
+ review_result = self.data_adapter.match(gold_content, pred_content)
229
+
230
+ # For LLM_RECALL strategy, use LLM to re-judge if rule-based result is not good
231
+ if (self.task_cfg.judge_strategy == JudgeStrategy.LLM_RECALL
232
+ and isinstance(review_result, (bool, int, float)) and not bool(review_result)):
233
+ assert self.judge is not None, f'Judge model is required for LLM_RECALL strategy {self.data_adapter.name}' # noqa: E501
234
+ review_result = self.data_adapter.llm_match(
235
+ gold_content, answer_content, self.judge, raw_input=raw_input_d)
236
+ pred = answer_content
237
+ else:
238
+ pred = pred_content
239
+
208
240
  choice[ReviewKeys.REVIEW] = {
209
241
  ReviewKeys.GOLD: gold_content,
210
- ReviewKeys.PRED: answer_content,
242
+ ReviewKeys.PRED: pred,
211
243
  ReviewKeys.RESULT: review_result
212
244
  }
213
-
214
245
  rev_choices.append(choice)
215
246
 
216
247
  review_res[AnswerKeys.CHOICES] = rev_choices
@@ -252,16 +283,23 @@ class Evaluator(object):
252
283
  logger.warning(f'Ignore use_cache={self.use_cache}, updating the review file: {review_file_path} ...')
253
284
  os.remove(review_file_path)
254
285
 
255
- for answer_d in tqdm(answers_list, total=len(answers_list), desc=f'Reviewing({subset_name}): '):
286
+ def process_single_review(answer_d):
256
287
  review_id, reviewer_spec = self._generate_review_id(answer_d)
257
288
  # Get review
258
289
  review_d = self._get_review(answer_d=answer_d, review_id=review_id, reviewer_spec=reviewer_spec)
259
-
260
290
  logger.debug(review_d)
261
-
262
- reviews_list.append(review_d)
263
- # Dump reviews
264
- dump_jsonl_data(review_d, review_file_path, dump_mode=DumpMode.APPEND)
291
+ return review_d
292
+
293
+ with ThreadPoolExecutor(max_workers=self.task_cfg.judge_worker_num) as executor:
294
+ # Submit all tasks and get futures
295
+ futures = [executor.submit(process_single_review, answer_d) for answer_d in answers_list]
296
+
297
+ # Process completed futures with progress bar
298
+ for future in tqdm(as_completed(futures), total=len(futures), desc=f'Reviewing({subset_name}): '):
299
+ review_d = future.result()
300
+ reviews_list.append(review_d)
301
+ # Dump reviews
302
+ dump_jsonl_data(review_d, review_file_path, dump_mode=DumpMode.APPEND)
265
303
 
266
304
  return reviews_list
267
305
 
@@ -0,0 +1,104 @@
1
+ import os
2
+ import re
3
+ from typing import Any, Dict, List, Optional
4
+
5
+ from evalscope.utils.logger import get_logger
6
+
7
+ logger = get_logger()
8
+
9
+ DEFAULT_PROMPT_TEMPLATE = """Your job is to look at a question, a gold target, and a predicted answer, and return a letter "A" or "B" to indicate whether the predicted answer is correct or incorrect.
10
+
11
+ Question: {question}
12
+
13
+ Reference Answer: {gold}
14
+
15
+ Model Answer: {pred}
16
+
17
+ Evaluate the model's answer based on correctness compared to the reference answer.
18
+ Grade the predicted answer of this new question as one of:
19
+ A: CORRECT
20
+ B: INCORRECT
21
+
22
+ Just return the letters "A" or "B", with no text around it.
23
+ """ # noqa: E501
24
+
25
+
26
+ class LLMJudge:
27
+ """
28
+ A metric that uses LLM to judge the quality of model predictions by comparing them with reference answers.
29
+ """
30
+
31
+ def __init__(self,
32
+ api_key: Optional[str] = None,
33
+ api_url: Optional[str] = None,
34
+ model_id: Optional[str] = None,
35
+ system_prompt: Optional[str] = None,
36
+ prompt_template: Optional[str] = None,
37
+ generation_config: Optional[Dict[str, Any]] = None,
38
+ **kwargs):
39
+ """
40
+ Initialize LLMJudge metric.
41
+
42
+ Args:
43
+ api_key (str, optional): API key for OpenAI or compatible service
44
+ api_base (str, optional): API base URL
45
+ model_id (str, optional): Model ID for LLM
46
+ system_prompt (str, optional): System prompt for the judge
47
+ prompt_template (str, optional): Prompt template for the judge
48
+ generation_config (dict, optional): Generation configuration for the judge
49
+ """
50
+ self.api_key = api_key or os.environ.get('OPENAI_API_KEY', 'EMPTY')
51
+ self.api_url = api_url or os.environ.get('OPENAI_API_BASE', 'https://api.openai.com/v1')
52
+ self.model_id = model_id or os.environ.get('LOCAL_LLM', 'gpt-3.5-turbo')
53
+ self.system_prompt = system_prompt or os.environ.get('JUDGE_SYSTEM_PROMPT', None)
54
+ self.prompt_template = prompt_template or os.environ.get('JUDGE_PROMPT_TEMPLATE', DEFAULT_PROMPT_TEMPLATE)
55
+ self.generation_config = generation_config
56
+
57
+ from evalscope.models.server_adapter import ServerModelAdapter
58
+
59
+ # Initialize ServerModelAdapter
60
+ self.server_adapter = ServerModelAdapter(api_url=self.api_url, model_id=self.model_id, api_key=self.api_key)
61
+
62
+ def __call__(self, prompt: str, system_prompt: Optional[str] = None) -> float:
63
+ """
64
+ Args:
65
+ prompt (str): The prompt to evaluate
66
+ system_prompt (str, optional): The system prompt to use for the evaluation
67
+ Returns:
68
+ float: The score of the evaluation
69
+ """
70
+ input_data = {'data': [prompt], 'system_prompt': system_prompt or self.system_prompt}
71
+
72
+ # Inference configuration
73
+ infer_cfg = {'temperature': 0.0, 'max_tokens': 1024}
74
+ if self.generation_config:
75
+ infer_cfg.update(self.generation_config)
76
+
77
+ try:
78
+ # Send request using ServerModelAdapter
79
+ response = self.server_adapter.process_single_input(input_data, infer_cfg)
80
+
81
+ # Extract content from response
82
+ llm_response = response.get('choices', [{}])[0].get('message', {}).get('content', '')
83
+ return llm_response
84
+ except Exception as e:
85
+ logger.error(f'Error during LLM evaluation: {e}')
86
+ return None
87
+
88
+ def build_prompt(self, pred: str, gold: str, question: Optional[str] = None):
89
+ if question is None:
90
+ question = 'Not provided'
91
+ return self.prompt_template.format(question=question, pred=pred, gold=gold)
92
+
93
+ def get_score(self, response: str) -> float:
94
+ if response is None:
95
+ return 0
96
+ match = re.search(r'(A|B)', response)
97
+ if match:
98
+ answer = match.group(0)
99
+ if answer == 'A':
100
+ return 1
101
+ elif answer == 'B':
102
+ return 0
103
+ else:
104
+ return 0
@@ -209,9 +209,14 @@ async def benchmark(args: Arguments) -> None:
209
209
  loop = asyncio.get_running_loop()
210
210
  add_signal_handlers(loop)
211
211
 
212
+ # init queue
212
213
  request_queue = asyncio.Queue()
213
214
  benchmark_data_queue = asyncio.Queue()
214
215
 
216
+ # reset event
217
+ query_send_completed_event.clear()
218
+ data_process_completed_event.clear()
219
+
215
220
  async def create_send_request_tasks():
216
221
  tasks: List[asyncio.Task] = []
217
222
  for idx in range(args.parallel):
@@ -145,7 +145,15 @@ async def test_connection(args: Arguments) -> bool:
145
145
  client = AioHttpClient(args)
146
146
  async with client:
147
147
  if 'chat/completions' in args.url:
148
- request = {'messages': [{'role': 'user', 'content': 'hello'}], 'model': args.model, 'max_tokens': 10}
148
+ request = {
149
+ 'messages': [{
150
+ 'role': 'user',
151
+ 'content': 'hello'
152
+ }],
153
+ 'model': args.model,
154
+ 'max_tokens': 10,
155
+ 'stream': args.stream
156
+ }
149
157
  else:
150
158
  request = {'prompt': 'hello', 'model': args.model, 'max_tokens': 10}
151
159
  async for is_error, state_code, response_data in client.post(request):
evalscope/perf/main.py CHANGED
@@ -35,6 +35,7 @@ def run_perf_benchmark(args):
35
35
  loop = asyncio.get_event_loop()
36
36
  if platform.system() != 'Windows':
37
37
  add_signal_handlers(loop)
38
+
38
39
  loop.run_until_complete(benchmark(args))
39
40
 
40
41
 
evalscope/run.py CHANGED
@@ -132,7 +132,7 @@ def create_evaluator(task_cfg: TaskConfig, dataset_name: str, outputs: OutputsSt
132
132
  # EvaluatorCollection is a collection of evaluators
133
133
  from evalscope.collections import EvaluatorCollection
134
134
  data_adapter = benchmark.get_data_adapter(config=task_cfg.dataset_args.get(dataset_name, {}))
135
- return EvaluatorCollection(task_cfg, data_adapter, outputs)
135
+ return EvaluatorCollection(task_cfg, data_adapter, outputs, base_model)
136
136
 
137
137
  # Initialize model adapter
138
138
  model_adapter = initialize_model_adapter(task_cfg, benchmark, base_model)
@@ -8,7 +8,7 @@ import random
8
8
  import torch
9
9
  from typing import List
10
10
 
11
- from evalscope.models.api import OpenaiApi
11
+ from evalscope.third_party.longbench_write.tools.openai_api import OpenaiApi
12
12
  from evalscope.third_party.longbench_write.utils import count_words
13
13
  from evalscope.utils import get_logger
14
14
 
evalscope/version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # Copyright (c) Alibaba, Inc. and its affiliates.
2
2
 
3
- __version__ = '0.12.1'
4
- __release_datetime__ = '2025-03-10 21:00:00'
3
+ __version__ = '0.13.0'
4
+ __release_datetime__ = '2025-03-14 12:00:00'
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: evalscope
3
- Version: 0.12.1
3
+ Version: 0.13.0
4
4
  Summary: EvalScope: Lightweight LLMs Evaluation Framework
5
5
  Home-page: https://github.com/modelscope/evalscope
6
6
  Author: ModelScope team
@@ -175,16 +175,29 @@ Requires-Dist: ms-vlmeval>=0.0.9; extra == "vlmeval"
175
175
  > ⭐ If you like this project, please click the "Star" button at the top right to support us. Your support is our motivation to keep going!
176
176
 
177
177
  ## 📋 Contents
178
- - [Introduction](#-introduction)
179
- - [News](#-news)
180
- - [Installation](#️-installation)
181
- - [Quick Start](#-quick-start)
178
+ - [📋 Contents](#-contents)
179
+ - [📝 Introduction](#-introduction)
180
+ - [☎ User Groups](#-user-groups)
181
+ - [🎉 News](#-news)
182
+ - [🛠️ Installation](#️-installation)
183
+ - [Method 1: Install Using pip](#method-1-install-using-pip)
184
+ - [Method 2: Install from Source](#method-2-install-from-source)
185
+ - [🚀 Quick Start](#-quick-start)
186
+ - [Method 1. Using Command Line](#method-1-using-command-line)
187
+ - [Method 2. Using Python Code](#method-2-using-python-code)
188
+ - [Basic Parameter](#basic-parameter)
189
+ - [Output Results](#output-results)
190
+ - [📈 Visualization of Evaluation Results](#-visualization-of-evaluation-results)
191
+ - [🌐 Evaluation of Specified Model API](#-evaluation-of-specified-model-api)
192
+ - [⚙️ Custom Parameter Evaluation](#️-custom-parameter-evaluation)
193
+ - [Parameter](#parameter)
182
194
  - [Evaluation Backend](#evaluation-backend)
183
- - [Custom Dataset Evaluation](#️-custom-dataset-evaluation)
184
- - [Model Serving Performance Evaluation](#-model-serving-performance-evaluation)
185
- - [Arena Mode](#-arena-mode)
186
- - [Contribution](#️-contribution)
187
- - [Roadmap](#-roadmap)
195
+ - [📈 Model Serving Performance Evaluation](#-model-serving-performance-evaluation)
196
+ - [🖊️ Custom Dataset Evaluation](#️-custom-dataset-evaluation)
197
+ - [🏟️ Arena Mode](#️-arena-mode)
198
+ - [👷‍♂️ Contribution](#️-contribution)
199
+ - [🔜 Roadmap](#-roadmap)
200
+ - [Star History](#star-history)
188
201
 
189
202
 
190
203
  ## 📝 Introduction
@@ -226,6 +239,8 @@ Please scan the QR code below to join our community groups:
226
239
 
227
240
  ## 🎉 News
228
241
 
242
+ - 🔥 **[2025.03.13]** Added support for the [LiveCodeBench](https://www.modelscope.cn/datasets/AI-ModelScope/code_generation_lite/summary) code evaluation benchmark. You can use it by specifying `live_code_bench`.
243
+ - 🔥 **[2025.03.11]** Added support for the [SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/SimpleQA/summary) and [Chinese SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/Chinese-SimpleQA/summary) evaluation benchmarks. These are used to assess the factual accuracy of models, and you can specify `simple_qa` and `chinese_simpleqa` for use. Support for specifying a judge model is also available. For more details, refer to the [relevant parameter documentation](https://evalscope.readthedocs.io/en/latest/get_started/parameters.html).
229
244
  - 🔥 **[2025.03.07]** Added support for the [QwQ-32B](https://modelscope.cn/models/Qwen/QwQ-32B/summary) model, evaluate the model's reasoning ability and reasoning efficiency, refer to [📖 Best Practices for QwQ-32B Evaluation](https://evalscope.readthedocs.io/en/latest/best_practice/eval_qwq.html) for more details.
230
245
  - 🔥 **[2025.03.04]** Added support for the [SuperGPQA](https://modelscope.cn/datasets/m-a-p/SuperGPQA/summary) dataset, which covers 13 categories, 72 first-level disciplines, and 285 second-level disciplines, totaling 26,529 questions. You can use it by specifying `super_gpqa`.
231
246
  - 🔥 **[2025.03.03]** Added support for evaluating the IQ and EQ of models. Refer to [📖 Best Practices for IQ and EQ Evaluation](https://evalscope.readthedocs.io/en/latest/best_practice/iquiz.html) to find out how smart your AI is!