evalscope 1.0.1__py3-none-any.whl → 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of evalscope might be problematic. Click here for more details.
- evalscope/api/benchmark/adapters/default_data_adapter.py +6 -4
- evalscope/api/benchmark/adapters/multi_choice_adapter.py +5 -2
- evalscope/api/benchmark/adapters/text2image_adapter.py +5 -4
- evalscope/api/benchmark/adapters/vision_language_adapter.py +3 -1
- evalscope/api/benchmark/benchmark.py +27 -2
- evalscope/api/benchmark/meta.py +3 -0
- evalscope/api/evaluator/evaluator.py +5 -0
- evalscope/api/evaluator/state.py +5 -0
- evalscope/api/messages/chat_message.py +6 -1
- evalscope/api/mixin/__init__.py +1 -0
- evalscope/api/mixin/llm_judge_mixin.py +2 -0
- evalscope/api/mixin/sandbox_mixin.py +204 -0
- evalscope/api/model/generate_config.py +0 -3
- evalscope/api/model/model.py +1 -1
- evalscope/api/tool/tool_info.py +1 -1
- evalscope/arguments.py +6 -0
- evalscope/benchmarks/ai2d/__init__.py +0 -0
- evalscope/benchmarks/ai2d/ai2d_adapter.py +53 -0
- evalscope/benchmarks/amc/__init__.py +0 -0
- evalscope/benchmarks/amc/amc_adapter.py +46 -0
- evalscope/benchmarks/bbh/bbh_adapter.py +43 -17
- evalscope/benchmarks/bfcl/bfcl_adapter.py +141 -2
- evalscope/benchmarks/bfcl/generation.py +7 -7
- evalscope/benchmarks/drop/drop_adapter.py +1 -1
- evalscope/benchmarks/healthbench/__init__.py +0 -0
- evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
- evalscope/benchmarks/healthbench/utils.py +102 -0
- evalscope/benchmarks/humaneval/humaneval_adapter.py +19 -35
- evalscope/benchmarks/humaneval/utils.py +235 -0
- evalscope/benchmarks/live_code_bench/evaluate_utils.py +13 -6
- evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +60 -37
- evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
- evalscope/benchmarks/math_500/math_500_adapter.py +0 -1
- evalscope/benchmarks/minerva_math/__init__.py +0 -0
- evalscope/benchmarks/minerva_math/minerva_math_adapter.py +48 -0
- evalscope/benchmarks/mm_bench/__init__.py +0 -0
- evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
- evalscope/benchmarks/mm_star/__init__.py +0 -0
- evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
- evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +4 -9
- evalscope/benchmarks/multi_if/__init__.py +0 -0
- evalscope/benchmarks/multi_if/ifeval.py +3354 -0
- evalscope/benchmarks/multi_if/metrics.py +120 -0
- evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
- evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +1 -4
- evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
- evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
- evalscope/benchmarks/olympiad_bench/utils.py +565 -0
- evalscope/benchmarks/omni_bench/__init__.py +0 -0
- evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
- evalscope/benchmarks/real_world_qa/__init__.py +0 -0
- evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
- evalscope/benchmarks/tau_bench/tau_bench_adapter.py +6 -1
- evalscope/config.py +24 -1
- evalscope/constants.py +3 -0
- evalscope/evaluator/evaluator.py +25 -7
- evalscope/metrics/metric.py +27 -2
- evalscope/models/model_apis.py +10 -8
- evalscope/models/utils/openai.py +1 -2
- evalscope/perf/arguments.py +2 -0
- evalscope/perf/plugin/api/base.py +2 -2
- evalscope/perf/plugin/api/default_api.py +7 -7
- evalscope/perf/plugin/api/openai_api.py +83 -19
- evalscope/perf/plugin/datasets/flickr8k.py +2 -2
- evalscope/perf/plugin/datasets/kontext_bench.py +2 -2
- evalscope/perf/plugin/datasets/random_vl_dataset.py +2 -2
- evalscope/perf/utils/benchmark_util.py +1 -2
- evalscope/report/combinator.py +0 -25
- evalscope/report/report.py +8 -4
- evalscope/run.py +1 -1
- evalscope/utils/function_utils.py +41 -0
- evalscope/utils/import_utils.py +63 -13
- evalscope/utils/io_utils.py +19 -11
- evalscope/utils/json_schema.py +23 -2
- evalscope/utils/logger.py +19 -0
- evalscope/utils/model_utils.py +1 -1
- evalscope/version.py +2 -2
- {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/METADATA +6 -10
- {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/RECORD +87 -59
- tests/benchmark/test_eval.py +51 -7
- tests/benchmark/test_sandbox.py +81 -0
- tests/benchmark/test_vlm.py +60 -3
- tests/perf/test_perf.py +40 -12
- {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/LICENSE +0 -0
- {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/WHEEL +0 -0
- {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/entry_points.txt +0 -0
- {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/top_level.txt +0 -0
|
@@ -14,9 +14,6 @@ from evalscope.utils.logger import get_logger
|
|
|
14
14
|
|
|
15
15
|
logger = get_logger()
|
|
16
16
|
|
|
17
|
-
# Example:
|
|
18
|
-
# {"task_id": "HumanEval/0", "prompt": "from typing import List\n\n\ndef has_close_elements(numbers: List[float], threshold: float) -> bool:\n \"\"\" Check if in given list of numbers, are any two numbers closer to each other than\n given threshold.\n >>> has_close_elements([1.0, 2.0, 3.0], 0.5)\n False\n >>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3)\n True\n \"\"\"\n", "entry_point": "has_close_elements", "canonical_solution": " for idx, elem in enumerate(numbers):\n for idx2, elem2 in enumerate(numbers):\n if idx != idx2:\n distance = abs(elem - elem2)\n if distance < threshold:\n return True\n\n return False\n", "test": "\n\nMETADATA = {\n 'author': 'jt',\n 'dataset': 'test'\n}\n\n\ndef check(candidate):\n assert candidate([1.0, 2.0, 3.9, 4.0, 5.0, 2.2], 0.3) == True\n assert candidate([1.0, 2.0, 3.9, 4.0, 5.0, 2.2], 0.05) == False\n assert candidate([1.0, 2.0, 5.9, 4.0, 5.0], 0.95) == True\n assert candidate([1.0, 2.0, 5.9, 4.0, 5.0], 0.8) == False\n assert candidate([1.0, 2.0, 3.0, 4.0, 5.0, 2.0], 0.1) == True\n assert candidate([1.1, 2.2, 3.1, 4.1, 5.1], 1.0) == True\n assert candidate([1.1, 2.2, 3.1, 4.1, 5.1], 0.5) == False\n\n"} # noqa
|
|
19
|
-
|
|
20
17
|
|
|
21
18
|
@register_benchmark(
|
|
22
19
|
BenchmarkMeta(
|
|
@@ -31,10 +28,7 @@ logger = get_logger()
|
|
|
31
28
|
eval_split='test',
|
|
32
29
|
prompt_template=
|
|
33
30
|
'Read the following function signature and docstring, and fully implement the function described. Your response should only contain the code for this function.\n{question}',
|
|
34
|
-
|
|
35
|
-
'num_workers': 4,
|
|
36
|
-
'timeout': 4
|
|
37
|
-
},
|
|
31
|
+
review_timeout=4,
|
|
38
32
|
)
|
|
39
33
|
)
|
|
40
34
|
class HumanevalAdapter(DefaultDataAdapter):
|
|
@@ -42,27 +36,6 @@ class HumanevalAdapter(DefaultDataAdapter):
|
|
|
42
36
|
HumanEval adapter using the new data processing framework.
|
|
43
37
|
"""
|
|
44
38
|
|
|
45
|
-
def __init__(self, **kwargs):
|
|
46
|
-
try:
|
|
47
|
-
from human_eval.data import stream_jsonl, write_jsonl
|
|
48
|
-
from human_eval.evaluation import check_correctness
|
|
49
|
-
except ImportError:
|
|
50
|
-
raise ImportError(
|
|
51
|
-
'Please install human_eval:'
|
|
52
|
-
'https://github.com/openai/human-eval/tree/master#installation , '
|
|
53
|
-
'Note that you need to enable the execution code in the human_eval/execution.py first.'
|
|
54
|
-
)
|
|
55
|
-
super().__init__(**kwargs)
|
|
56
|
-
|
|
57
|
-
extra_params = kwargs.get('extra_params', {})
|
|
58
|
-
self.k = [1]
|
|
59
|
-
self.num_workers = extra_params.get('num_workers', 4)
|
|
60
|
-
self.timeout = extra_params.get('timeout', 4)
|
|
61
|
-
|
|
62
|
-
self.read_problems_func = stream_jsonl
|
|
63
|
-
self.write_jsonl_func = write_jsonl
|
|
64
|
-
self.eval_func = check_correctness
|
|
65
|
-
|
|
66
39
|
def record_to_sample(self, record: Dict[str, Any]) -> Sample:
|
|
67
40
|
"""Convert a data record to a Sample object."""
|
|
68
41
|
query = record['prompt']
|
|
@@ -94,18 +67,29 @@ class HumanevalAdapter(DefaultDataAdapter):
|
|
|
94
67
|
def match_score(
|
|
95
68
|
self, original_prediction: str, filtered_prediction: str, reference: str, task_state: TaskState
|
|
96
69
|
) -> Score:
|
|
70
|
+
|
|
97
71
|
score = Score(
|
|
98
72
|
extracted_prediction=filtered_prediction,
|
|
99
73
|
prediction=original_prediction,
|
|
100
74
|
)
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
75
|
+
problem = task_state.metadata
|
|
76
|
+
completion = filtered_prediction
|
|
77
|
+
|
|
78
|
+
if not self.use_sandbox:
|
|
79
|
+
from .utils import check_correctness
|
|
80
|
+
|
|
81
|
+
# Execute the code and check correctness
|
|
82
|
+
res = check_correctness(problem=problem, completion=completion, timeout=self.review_timeout)
|
|
83
|
+
passed = res['passed']
|
|
84
|
+
else:
|
|
85
|
+
check_program = (
|
|
86
|
+
problem['prompt'] + completion + '\n' + problem['test'] + '\n' + f"check({problem['entry_point']})"
|
|
87
|
+
)
|
|
88
|
+
res = self.execute_code_in_sandbox(code=check_program, timeout=self.review_timeout, language='python')
|
|
89
|
+
passed = res.get('status') == 'success'
|
|
90
|
+
# Set score values
|
|
106
91
|
score.value = {'pass': passed}
|
|
107
|
-
score.
|
|
108
|
-
score.metadata = {'task_id': task_state.metadata['task_id'], 'timeout': self.timeout, 'execution_result': res}
|
|
92
|
+
score.metadata = {'task_id': problem['task_id'], 'timeout': self.review_timeout, 'execution_result': res}
|
|
109
93
|
score.main_score_name = 'pass'
|
|
110
94
|
|
|
111
95
|
return score
|
|
@@ -0,0 +1,235 @@
|
|
|
1
|
+
import contextlib
|
|
2
|
+
import faulthandler
|
|
3
|
+
import io
|
|
4
|
+
import multiprocessing
|
|
5
|
+
import os
|
|
6
|
+
import platform
|
|
7
|
+
import signal
|
|
8
|
+
import tempfile
|
|
9
|
+
from typing import Dict, Optional
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def unsafe_execute(problem: Dict, completion: str, timeout: float, result):
|
|
13
|
+
with create_tempdir():
|
|
14
|
+
|
|
15
|
+
# These system calls are needed when cleaning up tempdir.
|
|
16
|
+
import os
|
|
17
|
+
import shutil
|
|
18
|
+
|
|
19
|
+
rmtree = shutil.rmtree
|
|
20
|
+
rmdir = os.rmdir
|
|
21
|
+
chdir = os.chdir
|
|
22
|
+
|
|
23
|
+
# Disable functionalities that can make destructive changes to the test.
|
|
24
|
+
reliability_guard()
|
|
25
|
+
|
|
26
|
+
# Construct the check program and run it.
|
|
27
|
+
check_program = (
|
|
28
|
+
problem['prompt'] + completion + '\n' + problem['test'] + '\n' + f"check({problem['entry_point']})"
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
exec_globals = {}
|
|
33
|
+
with swallow_io():
|
|
34
|
+
with time_limit(timeout):
|
|
35
|
+
# WARNING
|
|
36
|
+
# This program exists to execute untrusted model-generated code. Although
|
|
37
|
+
# it is highly unlikely that model-generated code will do something overtly
|
|
38
|
+
# malicious in response to this test suite, model-generated code may act
|
|
39
|
+
# destructively due to a lack of model capability or alignment.
|
|
40
|
+
# Users are strongly encouraged to sandbox this evaluation suite so that it
|
|
41
|
+
# does not perform destructive actions on their host or network. For more
|
|
42
|
+
# information on how OpenAI sandboxes its code, see the accompanying paper.
|
|
43
|
+
# Once you have read this disclaimer and taken appropriate precautions,
|
|
44
|
+
# uncomment the following line and proceed at your own risk:
|
|
45
|
+
exec(check_program, exec_globals)
|
|
46
|
+
result.append('passed')
|
|
47
|
+
except TimeoutException:
|
|
48
|
+
result.append('timed out')
|
|
49
|
+
except BaseException as e:
|
|
50
|
+
result.append(f'failed: {e}')
|
|
51
|
+
|
|
52
|
+
# Needed for cleaning up.
|
|
53
|
+
shutil.rmtree = rmtree
|
|
54
|
+
os.rmdir = rmdir
|
|
55
|
+
os.chdir = chdir
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def check_correctness(problem: Dict, completion: str, timeout: float, completion_id: Optional[int] = None) -> Dict:
|
|
59
|
+
"""
|
|
60
|
+
Evaluates the functional correctness of a completion by running the test
|
|
61
|
+
suite provided in the problem.
|
|
62
|
+
|
|
63
|
+
:param completion_id: an optional completion ID so we can match
|
|
64
|
+
the results later even if execution finishes asynchronously.
|
|
65
|
+
"""
|
|
66
|
+
|
|
67
|
+
manager = multiprocessing.Manager()
|
|
68
|
+
result = manager.list()
|
|
69
|
+
|
|
70
|
+
p = multiprocessing.Process(target=unsafe_execute, args=(problem, completion, timeout, result))
|
|
71
|
+
p.start()
|
|
72
|
+
p.join(timeout=timeout + 1)
|
|
73
|
+
if p.is_alive():
|
|
74
|
+
p.kill()
|
|
75
|
+
|
|
76
|
+
if not result:
|
|
77
|
+
result.append('timed out')
|
|
78
|
+
|
|
79
|
+
return dict(
|
|
80
|
+
task_id=problem['task_id'],
|
|
81
|
+
passed=result[0] == 'passed',
|
|
82
|
+
result=result[0],
|
|
83
|
+
completion_id=completion_id,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
@contextlib.contextmanager
|
|
88
|
+
def time_limit(seconds: float):
|
|
89
|
+
|
|
90
|
+
def signal_handler(signum, frame):
|
|
91
|
+
raise TimeoutException('Timed out!')
|
|
92
|
+
|
|
93
|
+
signal.setitimer(signal.ITIMER_REAL, seconds)
|
|
94
|
+
signal.signal(signal.SIGALRM, signal_handler)
|
|
95
|
+
try:
|
|
96
|
+
yield
|
|
97
|
+
finally:
|
|
98
|
+
signal.setitimer(signal.ITIMER_REAL, 0)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
@contextlib.contextmanager
|
|
102
|
+
def swallow_io():
|
|
103
|
+
stream = WriteOnlyStringIO()
|
|
104
|
+
with contextlib.redirect_stdout(stream):
|
|
105
|
+
with contextlib.redirect_stderr(stream):
|
|
106
|
+
with redirect_stdin(stream):
|
|
107
|
+
yield
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
@contextlib.contextmanager
|
|
111
|
+
def create_tempdir():
|
|
112
|
+
with tempfile.TemporaryDirectory() as dirname:
|
|
113
|
+
with chdir(dirname):
|
|
114
|
+
yield dirname
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
class TimeoutException(Exception):
|
|
118
|
+
pass
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
class WriteOnlyStringIO(io.StringIO):
|
|
122
|
+
"""StringIO that throws an exception when it's read from"""
|
|
123
|
+
|
|
124
|
+
def read(self, *args, **kwargs):
|
|
125
|
+
raise IOError
|
|
126
|
+
|
|
127
|
+
def readline(self, *args, **kwargs):
|
|
128
|
+
raise IOError
|
|
129
|
+
|
|
130
|
+
def readlines(self, *args, **kwargs):
|
|
131
|
+
raise IOError
|
|
132
|
+
|
|
133
|
+
def readable(self, *args, **kwargs):
|
|
134
|
+
"""Returns True if the IO object can be read."""
|
|
135
|
+
return False
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
class redirect_stdin(contextlib._RedirectStream): # type: ignore
|
|
139
|
+
_stream = 'stdin'
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
@contextlib.contextmanager
|
|
143
|
+
def chdir(root):
|
|
144
|
+
if root == '.':
|
|
145
|
+
yield
|
|
146
|
+
return
|
|
147
|
+
cwd = os.getcwd()
|
|
148
|
+
os.chdir(root)
|
|
149
|
+
try:
|
|
150
|
+
yield
|
|
151
|
+
except BaseException as exc:
|
|
152
|
+
raise exc
|
|
153
|
+
finally:
|
|
154
|
+
os.chdir(cwd)
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def reliability_guard(maximum_memory_bytes: Optional[int] = None):
|
|
158
|
+
"""
|
|
159
|
+
This disables various destructive functions and prevents the generated code
|
|
160
|
+
from interfering with the test (e.g. fork bomb, killing other processes,
|
|
161
|
+
removing filesystem files, etc.)
|
|
162
|
+
|
|
163
|
+
WARNING
|
|
164
|
+
This function is NOT a security sandbox. Untrusted code, including, model-
|
|
165
|
+
generated code, should not be blindly executed outside of one. See the
|
|
166
|
+
Codex paper for more information about OpenAI's code sandbox, and proceed
|
|
167
|
+
with caution.
|
|
168
|
+
"""
|
|
169
|
+
|
|
170
|
+
if maximum_memory_bytes is not None:
|
|
171
|
+
import resource
|
|
172
|
+
|
|
173
|
+
resource.setrlimit(resource.RLIMIT_AS, (maximum_memory_bytes, maximum_memory_bytes))
|
|
174
|
+
resource.setrlimit(resource.RLIMIT_DATA, (maximum_memory_bytes, maximum_memory_bytes))
|
|
175
|
+
if not platform.uname().system == 'Darwin':
|
|
176
|
+
resource.setrlimit(resource.RLIMIT_STACK, (maximum_memory_bytes, maximum_memory_bytes))
|
|
177
|
+
|
|
178
|
+
faulthandler.disable()
|
|
179
|
+
|
|
180
|
+
import builtins
|
|
181
|
+
|
|
182
|
+
builtins.exit = None
|
|
183
|
+
builtins.quit = None
|
|
184
|
+
|
|
185
|
+
import os
|
|
186
|
+
|
|
187
|
+
os.environ['OMP_NUM_THREADS'] = '1'
|
|
188
|
+
|
|
189
|
+
os.kill = None
|
|
190
|
+
os.system = None
|
|
191
|
+
os.putenv = None
|
|
192
|
+
os.remove = None
|
|
193
|
+
os.removedirs = None
|
|
194
|
+
os.rmdir = None
|
|
195
|
+
os.fchdir = None
|
|
196
|
+
os.setuid = None
|
|
197
|
+
os.fork = None
|
|
198
|
+
os.forkpty = None
|
|
199
|
+
os.killpg = None
|
|
200
|
+
os.rename = None
|
|
201
|
+
os.renames = None
|
|
202
|
+
os.truncate = None
|
|
203
|
+
os.replace = None
|
|
204
|
+
os.unlink = None
|
|
205
|
+
os.fchmod = None
|
|
206
|
+
os.fchown = None
|
|
207
|
+
os.chmod = None
|
|
208
|
+
os.chown = None
|
|
209
|
+
os.chroot = None
|
|
210
|
+
os.fchdir = None
|
|
211
|
+
os.lchflags = None
|
|
212
|
+
os.lchmod = None
|
|
213
|
+
os.lchown = None
|
|
214
|
+
os.getcwd = None
|
|
215
|
+
os.chdir = None
|
|
216
|
+
|
|
217
|
+
import shutil
|
|
218
|
+
|
|
219
|
+
shutil.rmtree = None
|
|
220
|
+
shutil.move = None
|
|
221
|
+
shutil.chown = None
|
|
222
|
+
|
|
223
|
+
import subprocess
|
|
224
|
+
|
|
225
|
+
subprocess.Popen = None # type: ignore
|
|
226
|
+
|
|
227
|
+
__builtins__['help'] = None
|
|
228
|
+
|
|
229
|
+
import sys
|
|
230
|
+
|
|
231
|
+
sys.modules['ipdb'] = None
|
|
232
|
+
sys.modules['joblib'] = None
|
|
233
|
+
sys.modules['resource'] = None
|
|
234
|
+
sys.modules['psutil'] = None
|
|
235
|
+
sys.modules['tkinter'] = None
|
|
@@ -9,6 +9,19 @@ from .pass_k_utils import compute_metrics_from_results
|
|
|
9
9
|
logger = get_logger()
|
|
10
10
|
|
|
11
11
|
|
|
12
|
+
def _temp_run(sample, generation, debug, result, metadata_list, timeout):
|
|
13
|
+
"""Runs a test in a separate process to enforce a timeout.
|
|
14
|
+
This function is defined at the module's top level to ensure it can be
|
|
15
|
+
pickled by `multiprocessing.Process`. This is a requirement on platforms
|
|
16
|
+
like macOS (on Apple Silicon) which use the 'spawn' start method, as
|
|
17
|
+
nested functions are not picklable.
|
|
18
|
+
"""
|
|
19
|
+
from .testing_util import run_test
|
|
20
|
+
res, metadata = run_test(sample, test=generation, debug=debug, timeout=timeout)
|
|
21
|
+
result.append(res)
|
|
22
|
+
metadata_list.append(metadata)
|
|
23
|
+
|
|
24
|
+
|
|
12
25
|
def codegen_check_correctness(sample, generation, timeout, debug=True):
|
|
13
26
|
"""Check correctness of code generation with a global timeout.
|
|
14
27
|
|
|
@@ -16,12 +29,6 @@ def codegen_check_correctness(sample, generation, timeout, debug=True):
|
|
|
16
29
|
timeouts inside `run_test`
|
|
17
30
|
"""
|
|
18
31
|
|
|
19
|
-
def _temp_run(sample, generation, debug, result, metadata_list, timeout):
|
|
20
|
-
from .testing_util import run_test
|
|
21
|
-
res, metadata = run_test(sample, test=generation, debug=debug, timeout=timeout)
|
|
22
|
-
result.append(res)
|
|
23
|
-
metadata_list.append(metadata)
|
|
24
|
-
|
|
25
32
|
manager = multiprocessing.Manager()
|
|
26
33
|
result = manager.list()
|
|
27
34
|
metadata_list = manager.list()
|
|
@@ -7,7 +7,7 @@ from evalscope.api.messages.chat_message import ChatMessageUser
|
|
|
7
7
|
from evalscope.api.metric import Score
|
|
8
8
|
from evalscope.api.registry import register_benchmark
|
|
9
9
|
from evalscope.constants import Tags
|
|
10
|
-
from evalscope.utils.io_utils import
|
|
10
|
+
from evalscope.utils.io_utils import convert_normal_types
|
|
11
11
|
from evalscope.utils.logger import get_logger
|
|
12
12
|
|
|
13
13
|
logger = get_logger()
|
|
@@ -26,10 +26,10 @@ logger = get_logger()
|
|
|
26
26
|
eval_split='test',
|
|
27
27
|
prompt_template=
|
|
28
28
|
'### Question:\n{question_content}\n\n{format_prompt} ### Answer: (use the provided format with backticks)\n\n',
|
|
29
|
+
review_timeout=6,
|
|
29
30
|
extra_params={
|
|
30
31
|
'start_date': None,
|
|
31
32
|
'end_date': None,
|
|
32
|
-
'timeout': 6,
|
|
33
33
|
'debug': False
|
|
34
34
|
},
|
|
35
35
|
)
|
|
@@ -42,7 +42,6 @@ class LiveCodeBenchAdapter(DefaultDataAdapter):
|
|
|
42
42
|
def __init__(self, **kwargs):
|
|
43
43
|
super().__init__(**kwargs)
|
|
44
44
|
|
|
45
|
-
self.timeout = self.extra_params.get('timeout', 6)
|
|
46
45
|
self.debug = self.extra_params.get('debug', False)
|
|
47
46
|
self.start_date = self.extra_params.get('start_date')
|
|
48
47
|
self.end_date = self.extra_params.get('end_date')
|
|
@@ -81,45 +80,69 @@ class LiveCodeBenchAdapter(DefaultDataAdapter):
|
|
|
81
80
|
def match_score(
|
|
82
81
|
self, original_prediction: str, filtered_prediction: str, reference: str, task_state: TaskState
|
|
83
82
|
) -> Score:
|
|
84
|
-
from .evaluate_utils import codegen_metrics
|
|
85
|
-
|
|
86
83
|
score = Score(
|
|
87
84
|
extracted_prediction=filtered_prediction,
|
|
88
85
|
prediction=original_prediction,
|
|
89
86
|
)
|
|
90
87
|
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
88
|
+
if not self.use_sandbox:
|
|
89
|
+
# Use original evaluation method
|
|
90
|
+
from .evaluate_utils import codegen_metrics
|
|
91
|
+
|
|
92
|
+
references = [{'input_output': task_state.metadata['evaluation_sample']}]
|
|
93
|
+
predictions = [[filtered_prediction]]
|
|
94
|
+
|
|
95
|
+
try:
|
|
96
|
+
metrics, eval_results, final_metadata = codegen_metrics(
|
|
97
|
+
references,
|
|
98
|
+
predictions,
|
|
99
|
+
k_list=[1],
|
|
100
|
+
num_process_evaluate=1,
|
|
101
|
+
timeout=self.review_timeout,
|
|
102
|
+
debug=self.debug,
|
|
103
|
+
)
|
|
104
|
+
pass_rate = metrics['pass@1'] / 100 # convert to point scale
|
|
105
|
+
|
|
106
|
+
score.value = {'pass': float(pass_rate > 0)}
|
|
107
|
+
score.explanation = f"Pass@1: {metrics['pass@1']}%"
|
|
108
|
+
|
|
109
|
+
# Convert numpy types to native Python types for JSON serialization
|
|
110
|
+
serializable_eval_results = convert_normal_types(eval_results)
|
|
111
|
+
serializable_final_metadata = convert_normal_types(final_metadata)
|
|
112
|
+
|
|
113
|
+
score.metadata = {
|
|
114
|
+
'pass_rate': float(pass_rate),
|
|
115
|
+
'timeout': self.review_timeout,
|
|
116
|
+
'debug': self.debug,
|
|
117
|
+
'eval_results': serializable_eval_results,
|
|
118
|
+
'final_metadata': serializable_final_metadata
|
|
119
|
+
}
|
|
120
|
+
except Exception as e:
|
|
121
|
+
score.value = {'pass': False}
|
|
122
|
+
score.explanation = f'Evaluation failed: {str(e)}'
|
|
123
|
+
score.metadata = {'error': str(e)}
|
|
124
|
+
else:
|
|
125
|
+
# Use sandbox execution
|
|
126
|
+
try:
|
|
127
|
+
from .sandbox_evaluate_utils import evaluate_in_sandbox
|
|
128
|
+
|
|
129
|
+
evaluation_sample = task_state.metadata['evaluation_sample']
|
|
130
|
+
passed, detailed_results = evaluate_in_sandbox(
|
|
131
|
+
self, filtered_prediction, evaluation_sample, timeout=self.review_timeout, debug=self.debug
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
score.value = {'pass': passed}
|
|
135
|
+
score.explanation = f"Sandbox execution: {'Passed' if passed else 'Failed'}"
|
|
136
|
+
score.metadata = {
|
|
137
|
+
'timeout': self.review_timeout,
|
|
138
|
+
'debug': self.debug,
|
|
139
|
+
'execution_method': 'sandbox',
|
|
140
|
+
'detailed_results': detailed_results
|
|
141
|
+
}
|
|
142
|
+
except Exception as e:
|
|
143
|
+
score.value = {'pass': False}
|
|
144
|
+
score.explanation = f'Sandbox evaluation failed: {str(e)}'
|
|
145
|
+
score.metadata = {'error': str(e), 'execution_method': 'sandbox'}
|
|
123
146
|
|
|
124
147
|
score.main_score_name = 'pass'
|
|
125
148
|
return score
|