evalscope 0.12.1__py3-none-any.whl → 0.13.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of evalscope might be problematic. Click here for more details.
- evalscope/arguments.py +6 -1
- evalscope/benchmarks/arc/arc_adapter.py +3 -3
- evalscope/benchmarks/benchmark.py +3 -2
- evalscope/benchmarks/ceval/ceval_adapter.py +2 -1
- evalscope/benchmarks/chinese_simple_qa/__init__.py +0 -0
- evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +168 -0
- evalscope/benchmarks/cmmlu/cmmlu_adapter.py +2 -1
- evalscope/benchmarks/data_adapter.py +32 -4
- evalscope/benchmarks/general_qa/general_qa_adapter.py +5 -4
- evalscope/benchmarks/hellaswag/hellaswag_adapter.py +20 -24
- evalscope/benchmarks/humaneval/humaneval_adapter.py +8 -5
- evalscope/benchmarks/live_code_bench/__init__.py +0 -0
- evalscope/benchmarks/live_code_bench/evaluate_utils.py +193 -0
- evalscope/benchmarks/live_code_bench/execute_utils.py +267 -0
- evalscope/benchmarks/live_code_bench/extract_utils.py +70 -0
- evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +90 -0
- evalscope/benchmarks/live_code_bench/load_utils.py +71 -0
- evalscope/benchmarks/live_code_bench/pass_k_utils.py +56 -0
- evalscope/benchmarks/live_code_bench/prompts.py +207 -0
- evalscope/benchmarks/live_code_bench/testing_util.py +721 -0
- evalscope/benchmarks/mmlu/mmlu_adapter.py +3 -2
- evalscope/benchmarks/simple_qa/simple_qa_adapter.py +148 -1
- evalscope/benchmarks/super_gpqa/utils.py +0 -5
- evalscope/collections/evaluator.py +4 -4
- evalscope/config.py +11 -3
- evalscope/constants.py +8 -0
- evalscope/evaluator/evaluator.py +56 -17
- evalscope/metrics/llm_judge.py +104 -0
- evalscope/models/custom_adapter.py +1 -1
- evalscope/perf/arguments.py +11 -40
- evalscope/perf/benchmark.py +39 -28
- evalscope/perf/http_client.py +9 -1
- evalscope/perf/main.py +2 -1
- evalscope/perf/plugin/datasets/__init__.py +1 -0
- evalscope/perf/plugin/datasets/openqa.py +6 -11
- evalscope/perf/plugin/datasets/random_dataset.py +51 -0
- evalscope/perf/utils/db_util.py +3 -0
- evalscope/run.py +15 -3
- evalscope/third_party/longbench_write/infer.py +1 -1
- evalscope/version.py +2 -2
- {evalscope-0.12.1.dist-info → evalscope-0.13.1.dist-info}/METADATA +56 -38
- {evalscope-0.12.1.dist-info → evalscope-0.13.1.dist-info}/RECORD +50 -36
- tests/cli/test_all.py +144 -0
- tests/cli/test_collection.py +27 -1
- tests/cli/test_run.py +103 -11
- tests/perf/test_perf.py +23 -0
- {evalscope-0.12.1.dist-info → evalscope-0.13.1.dist-info}/LICENSE +0 -0
- {evalscope-0.12.1.dist-info → evalscope-0.13.1.dist-info}/WHEEL +0 -0
- {evalscope-0.12.1.dist-info → evalscope-0.13.1.dist-info}/entry_points.txt +0 -0
- {evalscope-0.12.1.dist-info → evalscope-0.13.1.dist-info}/top_level.txt +0 -0
|
@@ -144,7 +144,8 @@ SUBJECT_MAPPING = {
|
|
|
144
144
|
few_shot_num=5,
|
|
145
145
|
train_split='train',
|
|
146
146
|
eval_split='test',
|
|
147
|
-
prompt_template=
|
|
147
|
+
prompt_template=
|
|
148
|
+
'Answer the following multiple choice question about {subset_name}. There is only one correct answer. The last line of your response should be in the format "Answer: LETTER" (without quotes), where LETTER is one of A, B, C, D. \n{query}',
|
|
148
149
|
)
|
|
149
150
|
class MMLUAdapter(DataAdapter):
|
|
150
151
|
|
|
@@ -248,7 +249,7 @@ class MMLUAdapter(DataAdapter):
|
|
|
248
249
|
if self.model_adapter == OutputType.MULTIPLE_CHOICE:
|
|
249
250
|
return result
|
|
250
251
|
else:
|
|
251
|
-
return ResponseParser.parse_first_option(result
|
|
252
|
+
return ResponseParser.parse_first_option(result)
|
|
252
253
|
|
|
253
254
|
def match(self, gold: str, pred: str) -> float:
|
|
254
255
|
return exact_match(gold=gold, pred=pred)
|
|
@@ -1,16 +1,103 @@
|
|
|
1
|
+
import re
|
|
2
|
+
from collections import defaultdict
|
|
3
|
+
from typing import Any, List
|
|
4
|
+
|
|
1
5
|
from evalscope.benchmarks import Benchmark, DataAdapter
|
|
6
|
+
from evalscope.metrics import Metric, mean, metric_registry
|
|
7
|
+
from evalscope.metrics.llm_judge import LLMJudge
|
|
2
8
|
from evalscope.utils.logger import get_logger
|
|
3
9
|
|
|
4
10
|
# flake8: noqa
|
|
5
11
|
|
|
6
12
|
logger = get_logger()
|
|
7
13
|
|
|
14
|
+
GRADER_TEMPLATE = """
|
|
15
|
+
Your job is to look at a question, a gold target, and a predicted answer, and then assign a grade of either ["CORRECT", "INCORRECT", "NOT_ATTEMPTED"].
|
|
16
|
+
First, I will give examples of each grade, and then you will grade a new example.
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
The following are examples of CORRECT predicted answers.
|
|
20
|
+
```
|
|
21
|
+
Question: What are the names of Barack Obama's children?
|
|
22
|
+
Gold target: Malia Obama and Sasha Obama
|
|
23
|
+
Predicted answer 1: sasha and malia obama
|
|
24
|
+
Predicted answer 2: most people would say Malia and Sasha, but I'm not sure and would have to double check
|
|
25
|
+
Predicted answer 3: Barack Obama has two daughters. Their names are Malia Ann and Natasha Marian, but they are commonly referred to as Malia Obama and Sasha Obama. Malia was born on July 4, 1998, and Sasha was born on June 10, 2001.
|
|
26
|
+
```
|
|
27
|
+
These predicted answers are all CORRECT because:
|
|
28
|
+
- They fully contain the important information in the gold target.
|
|
29
|
+
- They do not contain any information that contradicts the gold target.
|
|
30
|
+
- Only semantic meaning matters; capitalization, punctuation, grammar, and order don't matter.
|
|
31
|
+
- Hedging and guessing are permissible, provided that the gold target is fully included and the response contains no incorrect information or contradictions.
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
The following are examples of INCORRECT predicted answers.
|
|
35
|
+
```
|
|
36
|
+
Question: What are the names of Barack Obama's children?
|
|
37
|
+
Gold target: Malia and Sasha
|
|
38
|
+
Predicted answer 1: Malia.
|
|
39
|
+
Predicted answer 2: Malia, Sasha, and Susan.
|
|
40
|
+
Predicted answer 3: Barack Obama does not have any children.
|
|
41
|
+
Predicted answer 4: I think it's either Malia and Sasha. Or it could be Malia and Jackie. Or it could be Joey and Malia.
|
|
42
|
+
Predicted answer 4: While I don't know their exact names, I can tell you that Barack Obama has three children.
|
|
43
|
+
Predicted answer 5: It's possible you may mean Betsy and Olivia. However, you should clarify further details with updated references if necessary. Is that the correct answer?
|
|
44
|
+
Predicted answer 6: It may be the case that Obama's child is named James. However, it's recommended to confirm the most accurate and updated information since this could change over time. This model may not always reflect the most current information.
|
|
45
|
+
```
|
|
46
|
+
These predicted answers are all INCORRECT because:
|
|
47
|
+
- A factual statement in the answer contradicts the gold target. Incorrect statements that have some hedging (e.g., "it is possible that", "although i'm not sure, i think") are also considered incorrect.
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
The following are examples of NOT_ATTEMPTED predicted answers.
|
|
51
|
+
```
|
|
52
|
+
Question: What are the names of Barack Obama's children?
|
|
53
|
+
Gold target: Malia and Sasha
|
|
54
|
+
Predicted answer 1: I don't know.
|
|
55
|
+
Predicted answer 2: I need more context about which Obama you are talking about.
|
|
56
|
+
Predicted answer 3: Without researching the web, I cannot answer this question. However, I can tell you that Barack Obama has two children.
|
|
57
|
+
Predicted answer 4: Barack Obama has two children. I know that one of them is Malia, but I'm not sure about the other one.
|
|
58
|
+
```
|
|
59
|
+
These predicted answers are all NOT_ATTEMPTED because:
|
|
60
|
+
- The important information in the gold target is not included in the answer.
|
|
61
|
+
- No statements in the answer contradict the gold target.
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
Also note the following things:
|
|
65
|
+
- For grading questions where the gold target is a number, the predicted answer needs to be correct to the last significant figure in the gold answer. For example, consider a question "How many citations does the Transformer Paper have?" with gold target "120k".
|
|
66
|
+
- Predicted answers "120k", "124k", and 115k" are all CORRECT.
|
|
67
|
+
- Predicted answers "100k" and "113k" are INCORRECT.
|
|
68
|
+
- Predicted answers "around 100k" and "more than 50k" are considered NOT_ATTEMPTED because they neither confirm nor contradict the gold target.
|
|
69
|
+
- The gold target may contain more information than the question. In such cases, the predicted answer only needs to contain the information that is in the question.
|
|
70
|
+
- For example, consider the question "What episode did Derek and Meredith get legally married in Grey's Anatomy?" with gold target "Season 7, Episode 20: White Wedding". Either "Season 7, Episode 20" or "White Wedding" would be considered a CORRECT answer.
|
|
71
|
+
- Do not punish predicted answers if they omit information that would be clearly inferred from the question.
|
|
72
|
+
- For example, consider the question "What city is OpenAI headquartered in?" and the gold target "San Francisco, California". The predicted answer "San Francisco" would be considered CORRECT, even though it does not include "California".
|
|
73
|
+
- Consider the question "What award did A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity win at NAACL '24?", the gold target is "Outstanding Paper Award". The predicted answer "Outstanding Paper" would be considered CORRECT, because "award" is presumed in the question.
|
|
74
|
+
- For the question "What is the height of Jason Wei in meters?", the gold target is "1.73 m". The predicted answer "1.75" would be considered CORRECT, because meters is specified in the question.
|
|
75
|
+
- For the question "What is the name of Barack Obama's wife?", the gold target is "Michelle Obama". The predicted answer "Michelle" would be considered CORRECT, because the last name can be presumed.
|
|
76
|
+
- Do not punish for typos in people's name if it's clearly the same name.
|
|
77
|
+
- For example, if the gold target is "Hyung Won Chung", you can consider the following predicted answers as correct: "Hyoong Won Choong", "Hyungwon Chung", or "Hyun Won Chung".
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
Here is a new example. Simply reply with either CORRECT, INCORRECT, NOT ATTEMPTED. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
|
81
|
+
```
|
|
82
|
+
Question: {question}
|
|
83
|
+
Gold target: {target}
|
|
84
|
+
Predicted answer: {predicted_answer}
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
Grade the predicted answer of this new question as one of:
|
|
88
|
+
A: CORRECT
|
|
89
|
+
B: INCORRECT
|
|
90
|
+
C: NOT_ATTEMPTED
|
|
91
|
+
|
|
92
|
+
Just return the letters "A", "B", or "C", with no text around it.
|
|
93
|
+
""".strip() # noqa: E501
|
|
94
|
+
|
|
8
95
|
|
|
9
96
|
@Benchmark.register(
|
|
10
97
|
name='simple_qa',
|
|
11
98
|
pretty_name='SimpleQA',
|
|
12
99
|
dataset_id='AI-ModelScope/SimpleQA',
|
|
13
|
-
metric_list=['
|
|
100
|
+
metric_list=['is_correct', 'is_incorrect', 'is_not_attempted'],
|
|
14
101
|
few_shot_num=0,
|
|
15
102
|
train_split=None,
|
|
16
103
|
eval_split='test')
|
|
@@ -18,3 +105,63 @@ class SimpleQAAdapter(DataAdapter):
|
|
|
18
105
|
|
|
19
106
|
def __init__(self, *args, **kwargs):
|
|
20
107
|
super().__init__(*args, **kwargs)
|
|
108
|
+
|
|
109
|
+
# register metrics
|
|
110
|
+
metric_registry.register(Metric(name='is_correct', object=mean))
|
|
111
|
+
metric_registry.register(Metric(name='is_incorrect', object=mean))
|
|
112
|
+
metric_registry.register(Metric(name='is_not_attempted', object=mean))
|
|
113
|
+
|
|
114
|
+
# whether to use LLM as a judge
|
|
115
|
+
self.llm_as_a_judge = True
|
|
116
|
+
|
|
117
|
+
def gen_prompt(self, input_d: dict, subset_name: str, few_shot_list: list, **kwargs) -> dict:
|
|
118
|
+
question = input_d['problem']
|
|
119
|
+
return self.gen_prompt_data(question)
|
|
120
|
+
|
|
121
|
+
def get_gold_answer(self, input_d: dict) -> str:
|
|
122
|
+
return input_d['answer']
|
|
123
|
+
|
|
124
|
+
def parse_pred_result(self, result: str, raw_input_d: dict = None, **kwargs) -> str:
|
|
125
|
+
return result.strip()
|
|
126
|
+
|
|
127
|
+
def match(self, gold: str, pred: str) -> float:
|
|
128
|
+
# simple match
|
|
129
|
+
logger.warning(f'Please use LLMJudge to match the result for SimpleQA')
|
|
130
|
+
is_correct = 1 if gold.lower().strip() == pred.lower().strip() else 0
|
|
131
|
+
is_incorrect = not is_correct
|
|
132
|
+
is_not_attempted = 0
|
|
133
|
+
return {
|
|
134
|
+
'is_correct': is_correct,
|
|
135
|
+
'is_incorrect': is_incorrect,
|
|
136
|
+
'is_not_attempted': is_not_attempted,
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
def llm_match(self, gold: Any, pred: Any, judge: LLMJudge, **kwargs) -> dict:
|
|
140
|
+
raw_input = kwargs.get('raw_input', None)
|
|
141
|
+
question = raw_input['problem']
|
|
142
|
+
# get grading response
|
|
143
|
+
prompt = GRADER_TEMPLATE.format(question=question, target=gold, predicted_answer=pred)
|
|
144
|
+
grading_response = judge(prompt)
|
|
145
|
+
# parse grading response
|
|
146
|
+
match = re.search(r'(A|B|C)', grading_response)
|
|
147
|
+
res = match.group(0) if match else 'C'
|
|
148
|
+
return {
|
|
149
|
+
'is_correct': 1 if res == 'A' else 0,
|
|
150
|
+
'is_incorrect': 1 if res == 'B' else 0,
|
|
151
|
+
'is_not_attempted': 1 if res == 'C' else 0,
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
def compute_metric(self, review_res_list: List[dict], **kwargs) -> List[dict]:
|
|
155
|
+
"""
|
|
156
|
+
compute weighted mean of the bleu score of all samples
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
review_res_list: [{'is_correct': 1, 'is_incorrect': 0, 'is_not_attempted': 0}, ...]
|
|
160
|
+
"""
|
|
161
|
+
# zip dict answers
|
|
162
|
+
res_dict = defaultdict(list)
|
|
163
|
+
for res in review_res_list:
|
|
164
|
+
for key, value in res.items():
|
|
165
|
+
res_dict[key].append(value)
|
|
166
|
+
|
|
167
|
+
return super().compute_metric(res_dict, **kwargs)
|
|
@@ -1,15 +1,10 @@
|
|
|
1
1
|
# flake8: noqa
|
|
2
2
|
import re
|
|
3
|
-
import timeout_decorator
|
|
4
3
|
|
|
5
4
|
|
|
6
|
-
@timeout_decorator.timeout(5) # 5 seconds timeout
|
|
7
5
|
def safe_regex_search(pattern, text, flags=0):
|
|
8
6
|
try:
|
|
9
7
|
return re.search(pattern, text, flags)
|
|
10
|
-
except timeout_decorator.TimeoutError:
|
|
11
|
-
print(f'Regex match timeout: pattern={pattern}, text={text[:100]}...')
|
|
12
|
-
return None
|
|
13
8
|
except Exception as e:
|
|
14
9
|
print(f'Regex match error: {str(e)}')
|
|
15
10
|
return None
|
|
@@ -12,7 +12,7 @@ from evalscope.collections.sampler import DatasetEntry
|
|
|
12
12
|
from evalscope.config import TaskConfig
|
|
13
13
|
from evalscope.constants import AnswerKeys, DumpMode, EvalType
|
|
14
14
|
from evalscope.evaluator import Evaluator
|
|
15
|
-
from evalscope.models import
|
|
15
|
+
from evalscope.models import initialize_model_adapter
|
|
16
16
|
from evalscope.report import ReportGenerator
|
|
17
17
|
from evalscope.utils.io_utils import OutputsStructure, dump_jsonl_data, jsonl_to_list
|
|
18
18
|
from evalscope.utils.logger import get_logger
|
|
@@ -53,11 +53,11 @@ class SimpleEvaluator(Evaluator):
|
|
|
53
53
|
|
|
54
54
|
class EvaluatorCollection:
|
|
55
55
|
|
|
56
|
-
def __init__(self, task_cfg: TaskConfig, data_adapter: DataAdapter, outputs: OutputsStructure):
|
|
56
|
+
def __init__(self, task_cfg: TaskConfig, data_adapter: DataAdapter, outputs: OutputsStructure, base_model):
|
|
57
57
|
self.task_cfg = task_cfg
|
|
58
58
|
self.data_adapter = data_adapter
|
|
59
59
|
self.outputs = outputs
|
|
60
|
-
self.model =
|
|
60
|
+
self.model = base_model
|
|
61
61
|
|
|
62
62
|
self.dataset, self.dataset_name = self.load()
|
|
63
63
|
self.dataset_name_map = EvaluatorCollection._init_name_map(self.dataset)
|
|
@@ -181,7 +181,7 @@ class EvaluatorCollection:
|
|
|
181
181
|
answers_list = jsonl_to_list(pred_file_path)
|
|
182
182
|
indices = set()
|
|
183
183
|
for answer in answers_list:
|
|
184
|
-
index = answer
|
|
184
|
+
index = answer.get(AnswerKeys.INDEX)
|
|
185
185
|
answer_dict[index] = answer
|
|
186
186
|
indices.add(index)
|
|
187
187
|
data = []
|
evalscope/config.py
CHANGED
|
@@ -9,7 +9,7 @@ from dataclasses import dataclass, field
|
|
|
9
9
|
from typing import Dict, List, Optional, Union
|
|
10
10
|
|
|
11
11
|
from evalscope.constants import (DEFAULT_DATASET_CACHE_DIR, DEFAULT_WORK_DIR, EvalBackend, EvalStage, EvalType, HubType,
|
|
12
|
-
OutputType)
|
|
12
|
+
JudgeStrategy, OutputType)
|
|
13
13
|
from evalscope.models.custom import CustomModel
|
|
14
14
|
from evalscope.utils import gen_hash
|
|
15
15
|
from evalscope.utils.io_utils import dict_to_yaml, json_to_dict, yaml_to_dict
|
|
@@ -73,10 +73,15 @@ class TaskConfig:
|
|
|
73
73
|
timeout: Optional[float] = None # Only used for server model
|
|
74
74
|
stream: bool = False # Only used for server model
|
|
75
75
|
|
|
76
|
+
# LLMJudge arguments
|
|
77
|
+
judge_strategy: str = JudgeStrategy.AUTO
|
|
78
|
+
judge_worker_num: int = 8
|
|
79
|
+
judge_model_args: Optional[Dict] = field(default_factory=lambda: {})
|
|
80
|
+
|
|
76
81
|
def __post_init__(self):
|
|
77
82
|
if (not self.model_id) and self.model:
|
|
78
83
|
if isinstance(self.model, CustomModel):
|
|
79
|
-
self.model_id =
|
|
84
|
+
self.model_id = self.model.config.get('model_id', 'custom_model')
|
|
80
85
|
else:
|
|
81
86
|
self.model_id = os.path.basename(self.model).rstrip(os.sep)
|
|
82
87
|
# fix path error, see http://github.com/modelscope/evalscope/issues/377
|
|
@@ -87,7 +92,10 @@ class TaskConfig:
|
|
|
87
92
|
self.eval_batch_size = 8 if self.eval_type == EvalType.SERVICE else 1
|
|
88
93
|
|
|
89
94
|
def to_dict(self):
|
|
90
|
-
|
|
95
|
+
result = self.__dict__.copy()
|
|
96
|
+
if isinstance(self.model, CustomModel):
|
|
97
|
+
result['model'] = self.model.__class__.__name__
|
|
98
|
+
return result
|
|
91
99
|
|
|
92
100
|
def __str__(self):
|
|
93
101
|
return json.dumps(self.to_dict(), indent=4, default=str, ensure_ascii=False)
|
evalscope/constants.py
CHANGED
|
@@ -77,6 +77,7 @@ class ArenaMode:
|
|
|
77
77
|
|
|
78
78
|
|
|
79
79
|
class AnswerKeys:
|
|
80
|
+
INDEX = 'index'
|
|
80
81
|
ANSWER_ID = 'answer_id'
|
|
81
82
|
RAW_INPUT = 'raw_input'
|
|
82
83
|
ORIGIN_PROMPT = 'origin_prompt'
|
|
@@ -156,3 +157,10 @@ class EvalBackend:
|
|
|
156
157
|
|
|
157
158
|
class DataCollection:
|
|
158
159
|
NAME = 'data_collection'
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
class JudgeStrategy:
|
|
163
|
+
AUTO = 'auto'
|
|
164
|
+
RULE = 'rule'
|
|
165
|
+
LLM = 'llm'
|
|
166
|
+
LLM_RECALL = 'llm_recall'
|
evalscope/evaluator/evaluator.py
CHANGED
|
@@ -11,7 +11,7 @@ from typing import Any, Dict, List, Optional, Union
|
|
|
11
11
|
|
|
12
12
|
from evalscope.benchmarks import DataAdapter
|
|
13
13
|
from evalscope.config import TaskConfig
|
|
14
|
-
from evalscope.constants import AnswerKeys, DumpMode, EvalStage, EvalType, ReviewKeys
|
|
14
|
+
from evalscope.constants import AnswerKeys, DumpMode, EvalStage, EvalType, JudgeStrategy, ReviewKeys
|
|
15
15
|
from evalscope.models import BaseModelAdapter
|
|
16
16
|
from evalscope.report import Report, gen_table
|
|
17
17
|
from evalscope.utils import dict_torch_dtype_to_str, gen_hash
|
|
@@ -58,9 +58,17 @@ class Evaluator(object):
|
|
|
58
58
|
self.task_cfg = task_cfg
|
|
59
59
|
# Deal with the output paths
|
|
60
60
|
self.outputs_structure = outputs
|
|
61
|
-
|
|
62
61
|
self.kwargs = kwargs
|
|
63
62
|
|
|
63
|
+
self._init_judge()
|
|
64
|
+
|
|
65
|
+
def _init_judge(self):
|
|
66
|
+
if self.task_cfg.judge_strategy == JudgeStrategy.RULE:
|
|
67
|
+
self.judge = None
|
|
68
|
+
else:
|
|
69
|
+
from evalscope.metrics.llm_judge import LLMJudge
|
|
70
|
+
self.judge = LLMJudge(**self.task_cfg.judge_model_args)
|
|
71
|
+
|
|
64
72
|
def load_dataset(self):
|
|
65
73
|
dataset = self.data_adapter.load(
|
|
66
74
|
work_dir=os.path.expanduser(self.task_cfg.dataset_dir), datasets_hub=self.dataset_hub, **self.kwargs)
|
|
@@ -73,7 +81,7 @@ class Evaluator(object):
|
|
|
73
81
|
for subset_name, prompts_list in prompts.items():
|
|
74
82
|
limit = self.task_cfg.limit or len(prompts_list)
|
|
75
83
|
for index, prompt in enumerate(prompts_list[:limit]):
|
|
76
|
-
prompt[
|
|
84
|
+
prompt[AnswerKeys.INDEX] = index
|
|
77
85
|
limited_prompts[subset_name].append(prompt)
|
|
78
86
|
|
|
79
87
|
return limited_prompts
|
|
@@ -89,7 +97,8 @@ class Evaluator(object):
|
|
|
89
97
|
answer_d[AnswerKeys.ANSWER_ID] = answer_id
|
|
90
98
|
answer_d[AnswerKeys.SUBSET_NAME] = subset_name
|
|
91
99
|
answer_d[AnswerKeys.RAW_INPUT] = input_d[AnswerKeys.RAW_INPUT]
|
|
92
|
-
answer_d[AnswerKeys.ORIGIN_PROMPT] = input_d
|
|
100
|
+
# answer_d[AnswerKeys.ORIGIN_PROMPT] = input_d
|
|
101
|
+
answer_d[AnswerKeys.INDEX] = input_d[AnswerKeys.INDEX]
|
|
93
102
|
return answer_d
|
|
94
103
|
|
|
95
104
|
def _get_answer(self, input_prompts, subset_name, infer_cfg) -> List[dict]:
|
|
@@ -109,7 +118,7 @@ class Evaluator(object):
|
|
|
109
118
|
return answers_list, prompts_list
|
|
110
119
|
|
|
111
120
|
def get_answered_indices(answers_list: List[Dict]) -> List[int]:
|
|
112
|
-
indices = [answer
|
|
121
|
+
indices = [answer.get(AnswerKeys.INDEX) for answer in answers_list]
|
|
113
122
|
|
|
114
123
|
if all(index is None for index in indices):
|
|
115
124
|
return list(range(len(answers_list)))
|
|
@@ -200,17 +209,40 @@ class Evaluator(object):
|
|
|
200
209
|
for choice in choices:
|
|
201
210
|
raw_input_d: dict = review_res[AnswerKeys.RAW_INPUT]
|
|
202
211
|
answer_content = choice[ReviewKeys.MESSAGE][ReviewKeys.CONTENT]
|
|
203
|
-
answer_content = self.data_adapter.parse_pred_result(
|
|
204
|
-
result=answer_content, raw_input_d=raw_input_d, eval_type=self.eval_type)
|
|
205
212
|
gold_content = self.data_adapter.get_gold_answer(raw_input_d)
|
|
206
213
|
|
|
207
|
-
|
|
214
|
+
# Get review result based on judge strategy
|
|
215
|
+
use_llm = (
|
|
216
|
+
self.task_cfg.judge_strategy == JudgeStrategy.LLM
|
|
217
|
+
or (self.task_cfg.judge_strategy == JudgeStrategy.AUTO and self.data_adapter.llm_as_a_judge))
|
|
218
|
+
|
|
219
|
+
if use_llm:
|
|
220
|
+
# Use LLM as judge
|
|
221
|
+
assert self.judge is not None, f'Judge model is required for LLM judging {self.data_adapter.name}'
|
|
222
|
+
review_result = self.data_adapter.llm_match(
|
|
223
|
+
gold_content, answer_content, self.judge, raw_input=raw_input_d)
|
|
224
|
+
pred = answer_content
|
|
225
|
+
else:
|
|
226
|
+
# Use rule-based judging
|
|
227
|
+
pred_content = self.data_adapter.parse_pred_result(
|
|
228
|
+
result=answer_content, raw_input_d=raw_input_d, eval_type=self.eval_type)
|
|
229
|
+
review_result = self.data_adapter.match(gold_content, pred_content)
|
|
230
|
+
|
|
231
|
+
# For LLM_RECALL strategy, use LLM to re-judge if rule-based result is not good
|
|
232
|
+
if (self.task_cfg.judge_strategy == JudgeStrategy.LLM_RECALL
|
|
233
|
+
and isinstance(review_result, (bool, int, float)) and not bool(review_result)):
|
|
234
|
+
assert self.judge is not None, f'Judge model is required for LLM_RECALL strategy {self.data_adapter.name}' # noqa: E501
|
|
235
|
+
review_result = self.data_adapter.llm_match(
|
|
236
|
+
gold_content, answer_content, self.judge, raw_input=raw_input_d)
|
|
237
|
+
pred = answer_content
|
|
238
|
+
else:
|
|
239
|
+
pred = pred_content
|
|
240
|
+
|
|
208
241
|
choice[ReviewKeys.REVIEW] = {
|
|
209
|
-
ReviewKeys.GOLD: gold_content,
|
|
210
|
-
ReviewKeys.PRED:
|
|
242
|
+
ReviewKeys.GOLD: gold_content if gold_content != raw_input_d else '*Same as Input*',
|
|
243
|
+
ReviewKeys.PRED: pred,
|
|
211
244
|
ReviewKeys.RESULT: review_result
|
|
212
245
|
}
|
|
213
|
-
|
|
214
246
|
rev_choices.append(choice)
|
|
215
247
|
|
|
216
248
|
review_res[AnswerKeys.CHOICES] = rev_choices
|
|
@@ -252,16 +284,23 @@ class Evaluator(object):
|
|
|
252
284
|
logger.warning(f'Ignore use_cache={self.use_cache}, updating the review file: {review_file_path} ...')
|
|
253
285
|
os.remove(review_file_path)
|
|
254
286
|
|
|
255
|
-
|
|
287
|
+
def process_single_review(answer_d):
|
|
256
288
|
review_id, reviewer_spec = self._generate_review_id(answer_d)
|
|
257
289
|
# Get review
|
|
258
290
|
review_d = self._get_review(answer_d=answer_d, review_id=review_id, reviewer_spec=reviewer_spec)
|
|
259
|
-
|
|
260
291
|
logger.debug(review_d)
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
292
|
+
return review_d
|
|
293
|
+
|
|
294
|
+
with ThreadPoolExecutor(max_workers=self.task_cfg.judge_worker_num) as executor:
|
|
295
|
+
# Submit all tasks and get futures
|
|
296
|
+
futures = [executor.submit(process_single_review, answer_d) for answer_d in answers_list]
|
|
297
|
+
|
|
298
|
+
# Process completed futures with progress bar
|
|
299
|
+
for future in tqdm(as_completed(futures), total=len(futures), desc=f'Reviewing({subset_name}): '):
|
|
300
|
+
review_d = future.result()
|
|
301
|
+
reviews_list.append(review_d)
|
|
302
|
+
# Dump reviews
|
|
303
|
+
dump_jsonl_data(review_d, review_file_path, dump_mode=DumpMode.APPEND)
|
|
265
304
|
|
|
266
305
|
return reviews_list
|
|
267
306
|
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import re
|
|
3
|
+
from typing import Any, Dict, List, Optional
|
|
4
|
+
|
|
5
|
+
from evalscope.utils.logger import get_logger
|
|
6
|
+
|
|
7
|
+
logger = get_logger()
|
|
8
|
+
|
|
9
|
+
DEFAULT_PROMPT_TEMPLATE = """Your job is to look at a question, a gold target, and a predicted answer, and return a letter "A" or "B" to indicate whether the predicted answer is correct or incorrect.
|
|
10
|
+
|
|
11
|
+
Question: {question}
|
|
12
|
+
|
|
13
|
+
Reference Answer: {gold}
|
|
14
|
+
|
|
15
|
+
Model Answer: {pred}
|
|
16
|
+
|
|
17
|
+
Evaluate the model's answer based on correctness compared to the reference answer.
|
|
18
|
+
Grade the predicted answer of this new question as one of:
|
|
19
|
+
A: CORRECT
|
|
20
|
+
B: INCORRECT
|
|
21
|
+
|
|
22
|
+
Just return the letters "A" or "B", with no text around it.
|
|
23
|
+
""" # noqa: E501
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class LLMJudge:
|
|
27
|
+
"""
|
|
28
|
+
A metric that uses LLM to judge the quality of model predictions by comparing them with reference answers.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(self,
|
|
32
|
+
api_key: Optional[str] = None,
|
|
33
|
+
api_url: Optional[str] = None,
|
|
34
|
+
model_id: Optional[str] = None,
|
|
35
|
+
system_prompt: Optional[str] = None,
|
|
36
|
+
prompt_template: Optional[str] = None,
|
|
37
|
+
generation_config: Optional[Dict[str, Any]] = None,
|
|
38
|
+
**kwargs):
|
|
39
|
+
"""
|
|
40
|
+
Initialize LLMJudge metric.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
api_key (str, optional): API key for OpenAI or compatible service
|
|
44
|
+
api_base (str, optional): API base URL
|
|
45
|
+
model_id (str, optional): Model ID for LLM
|
|
46
|
+
system_prompt (str, optional): System prompt for the judge
|
|
47
|
+
prompt_template (str, optional): Prompt template for the judge
|
|
48
|
+
generation_config (dict, optional): Generation configuration for the judge
|
|
49
|
+
"""
|
|
50
|
+
self.api_key = api_key or os.environ.get('OPENAI_API_KEY', 'EMPTY')
|
|
51
|
+
self.api_url = api_url or os.environ.get('OPENAI_API_BASE', 'https://api.openai.com/v1')
|
|
52
|
+
self.model_id = model_id or os.environ.get('LOCAL_LLM', 'gpt-3.5-turbo')
|
|
53
|
+
self.system_prompt = system_prompt or os.environ.get('JUDGE_SYSTEM_PROMPT', None)
|
|
54
|
+
self.prompt_template = prompt_template or os.environ.get('JUDGE_PROMPT_TEMPLATE', DEFAULT_PROMPT_TEMPLATE)
|
|
55
|
+
self.generation_config = generation_config
|
|
56
|
+
|
|
57
|
+
from evalscope.models.server_adapter import ServerModelAdapter
|
|
58
|
+
|
|
59
|
+
# Initialize ServerModelAdapter
|
|
60
|
+
self.server_adapter = ServerModelAdapter(api_url=self.api_url, model_id=self.model_id, api_key=self.api_key)
|
|
61
|
+
|
|
62
|
+
def __call__(self, prompt: str, system_prompt: Optional[str] = None) -> float:
|
|
63
|
+
"""
|
|
64
|
+
Args:
|
|
65
|
+
prompt (str): The prompt to evaluate
|
|
66
|
+
system_prompt (str, optional): The system prompt to use for the evaluation
|
|
67
|
+
Returns:
|
|
68
|
+
float: The score of the evaluation
|
|
69
|
+
"""
|
|
70
|
+
input_data = {'data': [prompt], 'system_prompt': system_prompt or self.system_prompt}
|
|
71
|
+
|
|
72
|
+
# Inference configuration
|
|
73
|
+
infer_cfg = {'temperature': 0.0, 'max_tokens': 1024}
|
|
74
|
+
if self.generation_config:
|
|
75
|
+
infer_cfg.update(self.generation_config)
|
|
76
|
+
|
|
77
|
+
try:
|
|
78
|
+
# Send request using ServerModelAdapter
|
|
79
|
+
response = self.server_adapter.process_single_input(input_data, infer_cfg)
|
|
80
|
+
|
|
81
|
+
# Extract content from response
|
|
82
|
+
llm_response = response.get('choices', [{}])[0].get('message', {}).get('content', '')
|
|
83
|
+
return llm_response
|
|
84
|
+
except Exception as e:
|
|
85
|
+
logger.error(f'Error during LLM evaluation: {e}')
|
|
86
|
+
return None
|
|
87
|
+
|
|
88
|
+
def build_prompt(self, pred: str, gold: str, question: Optional[str] = None):
|
|
89
|
+
if question is None:
|
|
90
|
+
question = 'Not provided'
|
|
91
|
+
return self.prompt_template.format(question=question, pred=pred, gold=gold)
|
|
92
|
+
|
|
93
|
+
def get_score(self, response: str) -> float:
|
|
94
|
+
if response is None:
|
|
95
|
+
return 0
|
|
96
|
+
match = re.search(r'(A|B)', response)
|
|
97
|
+
if match:
|
|
98
|
+
answer = match.group(0)
|
|
99
|
+
if answer == 'A':
|
|
100
|
+
return 1
|
|
101
|
+
elif answer == 'B':
|
|
102
|
+
return 0
|
|
103
|
+
else:
|
|
104
|
+
return 0
|
|
@@ -66,4 +66,4 @@ class CustomModelAdapter(BaseModelAdapter):
|
|
|
66
66
|
else:
|
|
67
67
|
raise TypeError(f'Unsupported inputs type: {type(input_prompt)}')
|
|
68
68
|
|
|
69
|
-
return self.custom_model.predict(prompts=in_prompts, **kwargs)
|
|
69
|
+
return self.custom_model.predict(prompts=in_prompts, origin_inputs=inputs, **kwargs)
|
evalscope/perf/arguments.py
CHANGED
|
@@ -24,6 +24,7 @@ class Arguments:
|
|
|
24
24
|
connect_timeout: int = 600 # Connection timeout in seconds
|
|
25
25
|
read_timeout: int = 600 # Read timeout in seconds
|
|
26
26
|
api_key: Optional[str] = None
|
|
27
|
+
no_test_connection: bool = False # Test the connection before starting the benchmark
|
|
27
28
|
|
|
28
29
|
# Performance and parallelism
|
|
29
30
|
number: Optional[int] = None # Number of requests to be made
|
|
@@ -40,8 +41,9 @@ class Arguments:
|
|
|
40
41
|
outputs_dir: str = DEFAULT_WORK_DIR
|
|
41
42
|
|
|
42
43
|
# Prompt settings
|
|
43
|
-
max_prompt_length: int =
|
|
44
|
+
max_prompt_length: int = 131072 # Maximum length of the prompt
|
|
44
45
|
min_prompt_length: int = 0 # Minimum length of the prompt
|
|
46
|
+
prefix_length: int = 0 # Length of the prefix, only for random dataset
|
|
45
47
|
prompt: Optional[str] = None # The prompt text
|
|
46
48
|
query_template: Optional[str] = None # Template for the query
|
|
47
49
|
|
|
@@ -65,44 +67,12 @@ class Arguments:
|
|
|
65
67
|
|
|
66
68
|
@staticmethod
|
|
67
69
|
def from_args(args):
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
connect_timeout=args.connect_timeout,
|
|
75
|
-
read_timeout=args.read_timeout,
|
|
76
|
-
number=args.number,
|
|
77
|
-
parallel=args.parallel,
|
|
78
|
-
rate=args.rate,
|
|
79
|
-
log_every_n_query=args.log_every_n_query,
|
|
80
|
-
headers=args.headers,
|
|
81
|
-
wandb_api_key=args.wandb_api_key,
|
|
82
|
-
name=args.name,
|
|
83
|
-
outputs_dir=args.outputs_dir,
|
|
84
|
-
debug=args.debug,
|
|
85
|
-
tokenizer_path=args.tokenizer_path,
|
|
86
|
-
api=args.api,
|
|
87
|
-
max_prompt_length=args.max_prompt_length,
|
|
88
|
-
min_prompt_length=args.min_prompt_length,
|
|
89
|
-
prompt=args.prompt,
|
|
90
|
-
query_template=args.query_template,
|
|
91
|
-
dataset=args.dataset,
|
|
92
|
-
dataset_path=args.dataset_path,
|
|
93
|
-
frequency_penalty=args.frequency_penalty,
|
|
94
|
-
logprobs=args.logprobs,
|
|
95
|
-
max_tokens=args.max_tokens,
|
|
96
|
-
min_tokens=args.min_tokens,
|
|
97
|
-
n_choices=args.n_choices,
|
|
98
|
-
seed=args.seed,
|
|
99
|
-
stop=args.stop,
|
|
100
|
-
stop_token_ids=args.stop_token_ids,
|
|
101
|
-
stream=args.stream,
|
|
102
|
-
temperature=args.temperature,
|
|
103
|
-
top_p=args.top_p,
|
|
104
|
-
top_k=args.top_k,
|
|
105
|
-
)
|
|
70
|
+
# Convert Namespace to a dictionary and filter out None values
|
|
71
|
+
args_dict = {k: v for k, v in vars(args).items() if v is not None}
|
|
72
|
+
|
|
73
|
+
if 'func' in args_dict:
|
|
74
|
+
del args_dict['func'] # Note: compat CLI arguments
|
|
75
|
+
return Arguments(**args_dict)
|
|
106
76
|
|
|
107
77
|
def __post_init__(self):
|
|
108
78
|
self.headers = self.headers or {} # Default to empty dictionary
|
|
@@ -153,6 +123,7 @@ def add_argument(parser: argparse.ArgumentParser):
|
|
|
153
123
|
parser.add_argument('--api-key', type=str, required=False, default=None, help='The API key for authentication')
|
|
154
124
|
parser.add_argument('--connect-timeout', type=int, default=600, help='The network connection timeout')
|
|
155
125
|
parser.add_argument('--read-timeout', type=int, default=600, help='The network read timeout')
|
|
126
|
+
parser.add_argument('--no-test-connection', action='store_false', default=False, help='Do not test the connection before starting the benchmark') # noqa: E501
|
|
156
127
|
|
|
157
128
|
# Performance and parallelism
|
|
158
129
|
parser.add_argument('-n', '--number', type=int, default=None, help='How many requests to be made')
|
|
@@ -168,6 +139,7 @@ def add_argument(parser: argparse.ArgumentParser):
|
|
|
168
139
|
# Prompt settings
|
|
169
140
|
parser.add_argument('--max-prompt-length', type=int, default=sys.maxsize, help='Maximum input prompt length')
|
|
170
141
|
parser.add_argument('--min-prompt-length', type=int, default=0, help='Minimum input prompt length')
|
|
142
|
+
parser.add_argument('--prefix-length', type=int, default=0, help='The prefix length')
|
|
171
143
|
parser.add_argument('--prompt', type=str, required=False, default=None, help='Specified the request prompt')
|
|
172
144
|
parser.add_argument('--query-template', type=str, default=None, help='Specify the query template')
|
|
173
145
|
|
|
@@ -193,7 +165,6 @@ def add_argument(parser: argparse.ArgumentParser):
|
|
|
193
165
|
parser.add_argument('--temperature', type=float, help='The sample temperature', default=None)
|
|
194
166
|
parser.add_argument('--top-p', type=float, help='Sampling top p', default=None)
|
|
195
167
|
parser.add_argument('--top-k', type=int, help='Sampling top k', default=None)
|
|
196
|
-
|
|
197
168
|
# yapf: enable
|
|
198
169
|
|
|
199
170
|
|