evalscope 1.0.1__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (155) hide show
  1. evalscope/api/benchmark/adapters/default_data_adapter.py +18 -4
  2. evalscope/api/benchmark/adapters/multi_choice_adapter.py +5 -2
  3. evalscope/api/benchmark/adapters/text2image_adapter.py +5 -4
  4. evalscope/api/benchmark/adapters/vision_language_adapter.py +3 -1
  5. evalscope/api/benchmark/benchmark.py +27 -2
  6. evalscope/api/benchmark/meta.py +3 -0
  7. evalscope/api/evaluator/evaluator.py +5 -0
  8. evalscope/api/evaluator/state.py +5 -0
  9. evalscope/api/messages/chat_message.py +6 -1
  10. evalscope/api/mixin/__init__.py +1 -0
  11. evalscope/api/mixin/llm_judge_mixin.py +2 -0
  12. evalscope/api/mixin/sandbox_mixin.py +204 -0
  13. evalscope/api/model/generate_config.py +0 -3
  14. evalscope/api/model/model.py +1 -1
  15. evalscope/api/tool/tool_info.py +1 -1
  16. evalscope/app/ui/multi_model.py +6 -1
  17. evalscope/app/ui/single_model.py +8 -2
  18. evalscope/app/utils/data_utils.py +3 -2
  19. evalscope/app/utils/visualization.py +2 -2
  20. evalscope/arguments.py +6 -0
  21. evalscope/benchmarks/ai2d/ai2d_adapter.py +54 -0
  22. evalscope/benchmarks/amc/__init__.py +0 -0
  23. evalscope/benchmarks/amc/amc_adapter.py +46 -0
  24. evalscope/benchmarks/bbh/bbh_adapter.py +43 -17
  25. evalscope/benchmarks/bfcl/bfcl_adapter.py +106 -2
  26. evalscope/benchmarks/bfcl/generation.py +7 -7
  27. evalscope/benchmarks/blink/__init__.py +0 -0
  28. evalscope/benchmarks/blink/blink_adapter.py +61 -0
  29. evalscope/benchmarks/chartqa/__init__.py +0 -0
  30. evalscope/benchmarks/chartqa/chartqa_adapter.py +80 -0
  31. evalscope/benchmarks/chartqa/utils.py +38 -0
  32. evalscope/benchmarks/docvqa/__init__.py +0 -0
  33. evalscope/benchmarks/docvqa/docvqa_adapter.py +67 -0
  34. evalscope/benchmarks/drop/drop_adapter.py +1 -1
  35. evalscope/benchmarks/general_arena/utils.py +2 -1
  36. evalscope/benchmarks/healthbench/__init__.py +0 -0
  37. evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
  38. evalscope/benchmarks/healthbench/utils.py +102 -0
  39. evalscope/benchmarks/hle/hle_adapter.py +3 -2
  40. evalscope/benchmarks/humaneval/humaneval_adapter.py +19 -35
  41. evalscope/benchmarks/humaneval/utils.py +235 -0
  42. evalscope/benchmarks/infovqa/__init__.py +0 -0
  43. evalscope/benchmarks/infovqa/infovqa_adapter.py +66 -0
  44. evalscope/benchmarks/live_code_bench/evaluate_utils.py +13 -6
  45. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +60 -37
  46. evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
  47. evalscope/benchmarks/math_500/math_500_adapter.py +0 -1
  48. evalscope/benchmarks/minerva_math/__init__.py +0 -0
  49. evalscope/benchmarks/minerva_math/minerva_math_adapter.py +48 -0
  50. evalscope/benchmarks/mm_bench/__init__.py +0 -0
  51. evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
  52. evalscope/benchmarks/mm_star/__init__.py +0 -0
  53. evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
  54. evalscope/benchmarks/mmmu/mmmu_adapter.py +1 -1
  55. evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +4 -9
  56. evalscope/benchmarks/multi_if/__init__.py +0 -0
  57. evalscope/benchmarks/multi_if/ifeval.py +3354 -0
  58. evalscope/benchmarks/multi_if/metrics.py +120 -0
  59. evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
  60. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +1 -4
  61. evalscope/benchmarks/ocr_bench/__init__.py +0 -0
  62. evalscope/benchmarks/ocr_bench/ocr_bench_adapter.py +101 -0
  63. evalscope/benchmarks/ocr_bench_v2/IoUscore_metric.py +87 -0
  64. evalscope/benchmarks/ocr_bench_v2/TEDS_metric.py +963 -0
  65. evalscope/benchmarks/ocr_bench_v2/__init__.py +0 -0
  66. evalscope/benchmarks/ocr_bench_v2/ocr_bench_v2_adapter.py +161 -0
  67. evalscope/benchmarks/ocr_bench_v2/page_ocr_metric.py +50 -0
  68. evalscope/benchmarks/ocr_bench_v2/parallel.py +46 -0
  69. evalscope/benchmarks/ocr_bench_v2/spotting_eval/__init__.py +0 -0
  70. evalscope/benchmarks/ocr_bench_v2/spotting_eval/readme.txt +26 -0
  71. evalscope/benchmarks/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +537 -0
  72. evalscope/benchmarks/ocr_bench_v2/spotting_eval/script.py +481 -0
  73. evalscope/benchmarks/ocr_bench_v2/spotting_metric.py +179 -0
  74. evalscope/benchmarks/ocr_bench_v2/utils.py +432 -0
  75. evalscope/benchmarks/ocr_bench_v2/vqa_metric.py +254 -0
  76. evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
  77. evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
  78. evalscope/benchmarks/olympiad_bench/utils.py +565 -0
  79. evalscope/benchmarks/omni_bench/__init__.py +0 -0
  80. evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
  81. evalscope/benchmarks/real_world_qa/__init__.py +0 -0
  82. evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
  83. evalscope/benchmarks/tau_bench/tau_bench_adapter.py +6 -1
  84. evalscope/config.py +24 -1
  85. evalscope/constants.py +3 -0
  86. evalscope/evaluator/evaluator.py +25 -7
  87. evalscope/metrics/metric.py +78 -2
  88. evalscope/metrics/metrics.py +16 -0
  89. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +0 -0
  90. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +0 -0
  91. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +0 -0
  92. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +0 -0
  93. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +0 -0
  94. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +0 -0
  95. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +0 -0
  96. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +0 -0
  97. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +0 -0
  98. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +0 -0
  99. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +2 -6
  100. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +2 -6
  101. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +2 -6
  102. evalscope/models/model_apis.py +10 -8
  103. evalscope/models/utils/openai.py +1 -2
  104. evalscope/perf/arguments.py +2 -0
  105. evalscope/perf/plugin/api/base.py +2 -2
  106. evalscope/perf/plugin/api/default_api.py +7 -7
  107. evalscope/perf/plugin/api/openai_api.py +83 -19
  108. evalscope/perf/plugin/datasets/flickr8k.py +2 -2
  109. evalscope/perf/plugin/datasets/kontext_bench.py +2 -2
  110. evalscope/perf/plugin/datasets/random_vl_dataset.py +2 -2
  111. evalscope/perf/utils/benchmark_util.py +1 -2
  112. evalscope/report/__init__.py +9 -1
  113. evalscope/report/combinator.py +45 -20
  114. evalscope/report/report.py +8 -4
  115. evalscope/run.py +1 -1
  116. evalscope/utils/function_utils.py +41 -0
  117. evalscope/utils/import_utils.py +63 -13
  118. evalscope/utils/io_utils.py +19 -11
  119. evalscope/utils/json_schema.py +25 -2
  120. evalscope/utils/logger.py +19 -0
  121. evalscope/utils/model_utils.py +1 -1
  122. evalscope/utils/multi_choices.py +16 -1
  123. evalscope/version.py +2 -2
  124. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/METADATA +10 -40
  125. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/RECORD +120 -95
  126. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/top_level.txt +0 -1
  127. tests/__init__.py +0 -1
  128. tests/benchmark/__init__.py +0 -1
  129. tests/benchmark/test_eval.py +0 -385
  130. tests/benchmark/test_image_edit.py +0 -65
  131. tests/benchmark/test_t2i.py +0 -142
  132. tests/benchmark/test_vlm.py +0 -80
  133. tests/cli/__init__.py +0 -1
  134. tests/cli/test_all.py +0 -269
  135. tests/cli/test_collection.py +0 -99
  136. tests/cli/test_custom.py +0 -268
  137. tests/cli/test_reasoning.py +0 -81
  138. tests/common.py +0 -73
  139. tests/perf/__init__.py +0 -1
  140. tests/perf/test_perf.py +0 -178
  141. tests/rag/test_clip_benchmark.py +0 -87
  142. tests/rag/test_mteb.py +0 -213
  143. tests/rag/test_ragas.py +0 -128
  144. tests/swift/__init__.py +0 -1
  145. tests/swift/test_run_swift_eval.py +0 -146
  146. tests/swift/test_run_swift_vlm_eval.py +0 -128
  147. tests/swift/test_run_swift_vlm_jugde_eval.py +0 -157
  148. tests/test_run_all.py +0 -12
  149. tests/utils.py +0 -13
  150. tests/vlm/__init__.py +0 -1
  151. tests/vlm/test_vlmeval.py +0 -102
  152. {tests/rag → evalscope/benchmarks/ai2d}/__init__.py +0 -0
  153. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/LICENSE +0 -0
  154. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/WHEEL +0 -0
  155. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,235 @@
1
+ import contextlib
2
+ import faulthandler
3
+ import io
4
+ import multiprocessing
5
+ import os
6
+ import platform
7
+ import signal
8
+ import tempfile
9
+ from typing import Dict, Optional
10
+
11
+
12
+ def unsafe_execute(problem: Dict, completion: str, timeout: float, result):
13
+ with create_tempdir():
14
+
15
+ # These system calls are needed when cleaning up tempdir.
16
+ import os
17
+ import shutil
18
+
19
+ rmtree = shutil.rmtree
20
+ rmdir = os.rmdir
21
+ chdir = os.chdir
22
+
23
+ # Disable functionalities that can make destructive changes to the test.
24
+ reliability_guard()
25
+
26
+ # Construct the check program and run it.
27
+ check_program = (
28
+ problem['prompt'] + completion + '\n' + problem['test'] + '\n' + f"check({problem['entry_point']})"
29
+ )
30
+
31
+ try:
32
+ exec_globals = {}
33
+ with swallow_io():
34
+ with time_limit(timeout):
35
+ # WARNING
36
+ # This program exists to execute untrusted model-generated code. Although
37
+ # it is highly unlikely that model-generated code will do something overtly
38
+ # malicious in response to this test suite, model-generated code may act
39
+ # destructively due to a lack of model capability or alignment.
40
+ # Users are strongly encouraged to sandbox this evaluation suite so that it
41
+ # does not perform destructive actions on their host or network. For more
42
+ # information on how OpenAI sandboxes its code, see the accompanying paper.
43
+ # Once you have read this disclaimer and taken appropriate precautions,
44
+ # uncomment the following line and proceed at your own risk:
45
+ exec(check_program, exec_globals)
46
+ result.append('passed')
47
+ except TimeoutException:
48
+ result.append('timed out')
49
+ except BaseException as e:
50
+ result.append(f'failed: {e}')
51
+
52
+ # Needed for cleaning up.
53
+ shutil.rmtree = rmtree
54
+ os.rmdir = rmdir
55
+ os.chdir = chdir
56
+
57
+
58
+ def check_correctness(problem: Dict, completion: str, timeout: float, completion_id: Optional[int] = None) -> Dict:
59
+ """
60
+ Evaluates the functional correctness of a completion by running the test
61
+ suite provided in the problem.
62
+
63
+ :param completion_id: an optional completion ID so we can match
64
+ the results later even if execution finishes asynchronously.
65
+ """
66
+
67
+ manager = multiprocessing.Manager()
68
+ result = manager.list()
69
+
70
+ p = multiprocessing.Process(target=unsafe_execute, args=(problem, completion, timeout, result))
71
+ p.start()
72
+ p.join(timeout=timeout + 1)
73
+ if p.is_alive():
74
+ p.kill()
75
+
76
+ if not result:
77
+ result.append('timed out')
78
+
79
+ return dict(
80
+ task_id=problem['task_id'],
81
+ passed=result[0] == 'passed',
82
+ result=result[0],
83
+ completion_id=completion_id,
84
+ )
85
+
86
+
87
+ @contextlib.contextmanager
88
+ def time_limit(seconds: float):
89
+
90
+ def signal_handler(signum, frame):
91
+ raise TimeoutException('Timed out!')
92
+
93
+ signal.setitimer(signal.ITIMER_REAL, seconds)
94
+ signal.signal(signal.SIGALRM, signal_handler)
95
+ try:
96
+ yield
97
+ finally:
98
+ signal.setitimer(signal.ITIMER_REAL, 0)
99
+
100
+
101
+ @contextlib.contextmanager
102
+ def swallow_io():
103
+ stream = WriteOnlyStringIO()
104
+ with contextlib.redirect_stdout(stream):
105
+ with contextlib.redirect_stderr(stream):
106
+ with redirect_stdin(stream):
107
+ yield
108
+
109
+
110
+ @contextlib.contextmanager
111
+ def create_tempdir():
112
+ with tempfile.TemporaryDirectory() as dirname:
113
+ with chdir(dirname):
114
+ yield dirname
115
+
116
+
117
+ class TimeoutException(Exception):
118
+ pass
119
+
120
+
121
+ class WriteOnlyStringIO(io.StringIO):
122
+ """StringIO that throws an exception when it's read from"""
123
+
124
+ def read(self, *args, **kwargs):
125
+ raise IOError
126
+
127
+ def readline(self, *args, **kwargs):
128
+ raise IOError
129
+
130
+ def readlines(self, *args, **kwargs):
131
+ raise IOError
132
+
133
+ def readable(self, *args, **kwargs):
134
+ """Returns True if the IO object can be read."""
135
+ return False
136
+
137
+
138
+ class redirect_stdin(contextlib._RedirectStream): # type: ignore
139
+ _stream = 'stdin'
140
+
141
+
142
+ @contextlib.contextmanager
143
+ def chdir(root):
144
+ if root == '.':
145
+ yield
146
+ return
147
+ cwd = os.getcwd()
148
+ os.chdir(root)
149
+ try:
150
+ yield
151
+ except BaseException as exc:
152
+ raise exc
153
+ finally:
154
+ os.chdir(cwd)
155
+
156
+
157
+ def reliability_guard(maximum_memory_bytes: Optional[int] = None):
158
+ """
159
+ This disables various destructive functions and prevents the generated code
160
+ from interfering with the test (e.g. fork bomb, killing other processes,
161
+ removing filesystem files, etc.)
162
+
163
+ WARNING
164
+ This function is NOT a security sandbox. Untrusted code, including, model-
165
+ generated code, should not be blindly executed outside of one. See the
166
+ Codex paper for more information about OpenAI's code sandbox, and proceed
167
+ with caution.
168
+ """
169
+
170
+ if maximum_memory_bytes is not None:
171
+ import resource
172
+
173
+ resource.setrlimit(resource.RLIMIT_AS, (maximum_memory_bytes, maximum_memory_bytes))
174
+ resource.setrlimit(resource.RLIMIT_DATA, (maximum_memory_bytes, maximum_memory_bytes))
175
+ if not platform.uname().system == 'Darwin':
176
+ resource.setrlimit(resource.RLIMIT_STACK, (maximum_memory_bytes, maximum_memory_bytes))
177
+
178
+ faulthandler.disable()
179
+
180
+ import builtins
181
+
182
+ builtins.exit = None
183
+ builtins.quit = None
184
+
185
+ import os
186
+
187
+ os.environ['OMP_NUM_THREADS'] = '1'
188
+
189
+ os.kill = None
190
+ os.system = None
191
+ os.putenv = None
192
+ os.remove = None
193
+ os.removedirs = None
194
+ os.rmdir = None
195
+ os.fchdir = None
196
+ os.setuid = None
197
+ os.fork = None
198
+ os.forkpty = None
199
+ os.killpg = None
200
+ os.rename = None
201
+ os.renames = None
202
+ os.truncate = None
203
+ os.replace = None
204
+ os.unlink = None
205
+ os.fchmod = None
206
+ os.fchown = None
207
+ os.chmod = None
208
+ os.chown = None
209
+ os.chroot = None
210
+ os.fchdir = None
211
+ os.lchflags = None
212
+ os.lchmod = None
213
+ os.lchown = None
214
+ os.getcwd = None
215
+ os.chdir = None
216
+
217
+ import shutil
218
+
219
+ shutil.rmtree = None
220
+ shutil.move = None
221
+ shutil.chown = None
222
+
223
+ import subprocess
224
+
225
+ subprocess.Popen = None # type: ignore
226
+
227
+ __builtins__['help'] = None
228
+
229
+ import sys
230
+
231
+ sys.modules['ipdb'] = None
232
+ sys.modules['joblib'] = None
233
+ sys.modules['resource'] = None
234
+ sys.modules['psutil'] = None
235
+ sys.modules['tkinter'] = None
File without changes
@@ -0,0 +1,66 @@
1
+ import json
2
+ from typing import Any, Dict, List
3
+
4
+ from evalscope.api.benchmark import BenchmarkMeta, VisionLanguageAdapter
5
+ from evalscope.api.dataset import Sample
6
+ from evalscope.api.evaluator.state import TaskState
7
+ from evalscope.api.messages import ChatMessageUser, Content, ContentImage, ContentText
8
+ from evalscope.api.registry import register_benchmark
9
+ from evalscope.constants import Tags
10
+ from evalscope.utils.io_utils import bytes_to_base64
11
+ from evalscope.utils.logger import get_logger
12
+
13
+ logger = get_logger()
14
+
15
+ PROMPT = """Answer the question according to the image using a single word or phrase.
16
+ {question}
17
+ The last line of your response should be of the form "ANSWER: $ANSWER" (without quotes) where $ANSWER is the answer to the question.""" # noqa: E501
18
+
19
+
20
+ @register_benchmark(
21
+ BenchmarkMeta(
22
+ name='infovqa',
23
+ pretty_name='InfoVQA',
24
+ tags=[Tags.MULTI_MODAL, Tags.KNOWLEDGE, Tags.QA],
25
+ description=
26
+ 'InfoVQA (Information Visual Question Answering) is a benchmark designed to evaluate how well AI models can answer questions based on information-dense images, such as charts, graphs, diagrams, maps, and infographics.', # noqa: E501
27
+ dataset_id='lmms-lab/DocVQA',
28
+ subset_list=['InfographicVQA'],
29
+ metric_list=['anls'],
30
+ eval_split='validation',
31
+ prompt_template=PROMPT,
32
+ )
33
+ )
34
+ class InfoVQAAdapter(VisionLanguageAdapter):
35
+
36
+ def __init__(self, **kwargs):
37
+ super().__init__(**kwargs)
38
+ self.add_aggregation_name = False
39
+
40
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
41
+
42
+ input_text = PROMPT.format(question=record['question'])
43
+ content_list: List[Content] = [ContentText(text=input_text)]
44
+ image = record.get('image')
45
+ if image:
46
+ image_base64 = bytes_to_base64(image['bytes'], format='png', add_header=True)
47
+ content_list.append(ContentImage(image=image_base64))
48
+ return Sample(
49
+ input=[ChatMessageUser(content=content_list)],
50
+ target=json.dumps(record.get('answers')), # answers is a list
51
+ metadata={
52
+ 'questionId': record.get('questionId'),
53
+ 'answer_type': record.get('answer_type'),
54
+ 'image_url': record.get('image_url'),
55
+ 'ocr': record.get('ocr'),
56
+ }
57
+ )
58
+
59
+ def extract_answer(self, prediction: str, task_state: TaskState) -> str:
60
+ import re
61
+
62
+ pattern = r'ANSWER:\s*(.*)'
63
+ match = re.search(pattern, prediction)
64
+ if match:
65
+ return match.group(1).strip()
66
+ return prediction.strip()
@@ -9,6 +9,19 @@ from .pass_k_utils import compute_metrics_from_results
9
9
  logger = get_logger()
10
10
 
11
11
 
12
+ def _temp_run(sample, generation, debug, result, metadata_list, timeout):
13
+ """Runs a test in a separate process to enforce a timeout.
14
+ This function is defined at the module's top level to ensure it can be
15
+ pickled by `multiprocessing.Process`. This is a requirement on platforms
16
+ like macOS (on Apple Silicon) which use the 'spawn' start method, as
17
+ nested functions are not picklable.
18
+ """
19
+ from .testing_util import run_test
20
+ res, metadata = run_test(sample, test=generation, debug=debug, timeout=timeout)
21
+ result.append(res)
22
+ metadata_list.append(metadata)
23
+
24
+
12
25
  def codegen_check_correctness(sample, generation, timeout, debug=True):
13
26
  """Check correctness of code generation with a global timeout.
14
27
 
@@ -16,12 +29,6 @@ def codegen_check_correctness(sample, generation, timeout, debug=True):
16
29
  timeouts inside `run_test`
17
30
  """
18
31
 
19
- def _temp_run(sample, generation, debug, result, metadata_list, timeout):
20
- from .testing_util import run_test
21
- res, metadata = run_test(sample, test=generation, debug=debug, timeout=timeout)
22
- result.append(res)
23
- metadata_list.append(metadata)
24
-
25
32
  manager = multiprocessing.Manager()
26
33
  result = manager.list()
27
34
  metadata_list = manager.list()
@@ -7,7 +7,7 @@ from evalscope.api.messages.chat_message import ChatMessageUser
7
7
  from evalscope.api.metric import Score
8
8
  from evalscope.api.registry import register_benchmark
9
9
  from evalscope.constants import Tags
10
- from evalscope.utils.io_utils import convert_numpy_types
10
+ from evalscope.utils.io_utils import convert_normal_types
11
11
  from evalscope.utils.logger import get_logger
12
12
 
13
13
  logger = get_logger()
@@ -26,10 +26,10 @@ logger = get_logger()
26
26
  eval_split='test',
27
27
  prompt_template=
28
28
  '### Question:\n{question_content}\n\n{format_prompt} ### Answer: (use the provided format with backticks)\n\n',
29
+ review_timeout=6,
29
30
  extra_params={
30
31
  'start_date': None,
31
32
  'end_date': None,
32
- 'timeout': 6,
33
33
  'debug': False
34
34
  },
35
35
  )
@@ -42,7 +42,6 @@ class LiveCodeBenchAdapter(DefaultDataAdapter):
42
42
  def __init__(self, **kwargs):
43
43
  super().__init__(**kwargs)
44
44
 
45
- self.timeout = self.extra_params.get('timeout', 6)
46
45
  self.debug = self.extra_params.get('debug', False)
47
46
  self.start_date = self.extra_params.get('start_date')
48
47
  self.end_date = self.extra_params.get('end_date')
@@ -81,45 +80,69 @@ class LiveCodeBenchAdapter(DefaultDataAdapter):
81
80
  def match_score(
82
81
  self, original_prediction: str, filtered_prediction: str, reference: str, task_state: TaskState
83
82
  ) -> Score:
84
- from .evaluate_utils import codegen_metrics
85
-
86
83
  score = Score(
87
84
  extracted_prediction=filtered_prediction,
88
85
  prediction=original_prediction,
89
86
  )
90
87
 
91
- references = [{'input_output': task_state.metadata['evaluation_sample']}]
92
- predictions = [[filtered_prediction]]
93
-
94
- try:
95
- metrics, eval_results, final_metadata = codegen_metrics(
96
- references,
97
- predictions,
98
- k_list=[1],
99
- num_process_evaluate=1,
100
- timeout=self.timeout,
101
- debug=self.debug,
102
- )
103
- pass_rate = metrics['pass@1'] / 100 # convert to point scale
104
-
105
- score.value = {'pass': float(pass_rate > 0)}
106
- score.explanation = f"Pass@1: {metrics['pass@1']}%"
107
-
108
- # Convert numpy types to native Python types for JSON serialization
109
- serializable_eval_results = convert_numpy_types(eval_results)
110
- serializable_final_metadata = convert_numpy_types(final_metadata)
111
-
112
- score.metadata = {
113
- 'pass_rate': float(pass_rate),
114
- 'timeout': self.timeout,
115
- 'debug': self.debug,
116
- 'eval_results': serializable_eval_results,
117
- 'final_metadata': serializable_final_metadata
118
- }
119
- except Exception as e:
120
- score.value = {'pass': False}
121
- score.explanation = f'Evaluation failed: {str(e)}'
122
- score.metadata = {'error': str(e)}
88
+ if not self.use_sandbox:
89
+ # Use original evaluation method
90
+ from .evaluate_utils import codegen_metrics
91
+
92
+ references = [{'input_output': task_state.metadata['evaluation_sample']}]
93
+ predictions = [[filtered_prediction]]
94
+
95
+ try:
96
+ metrics, eval_results, final_metadata = codegen_metrics(
97
+ references,
98
+ predictions,
99
+ k_list=[1],
100
+ num_process_evaluate=1,
101
+ timeout=self.review_timeout,
102
+ debug=self.debug,
103
+ )
104
+ pass_rate = metrics['pass@1'] / 100 # convert to point scale
105
+
106
+ score.value = {'pass': float(pass_rate > 0)}
107
+ score.explanation = f"Pass@1: {metrics['pass@1']}%"
108
+
109
+ # Convert numpy types to native Python types for JSON serialization
110
+ serializable_eval_results = convert_normal_types(eval_results)
111
+ serializable_final_metadata = convert_normal_types(final_metadata)
112
+
113
+ score.metadata = {
114
+ 'pass_rate': float(pass_rate),
115
+ 'timeout': self.review_timeout,
116
+ 'debug': self.debug,
117
+ 'eval_results': serializable_eval_results,
118
+ 'final_metadata': serializable_final_metadata
119
+ }
120
+ except Exception as e:
121
+ score.value = {'pass': False}
122
+ score.explanation = f'Evaluation failed: {str(e)}'
123
+ score.metadata = {'error': str(e)}
124
+ else:
125
+ # Use sandbox execution
126
+ try:
127
+ from .sandbox_evaluate_utils import evaluate_in_sandbox
128
+
129
+ evaluation_sample = task_state.metadata['evaluation_sample']
130
+ passed, detailed_results = evaluate_in_sandbox(
131
+ self, filtered_prediction, evaluation_sample, timeout=self.review_timeout, debug=self.debug
132
+ )
133
+
134
+ score.value = {'pass': passed}
135
+ score.explanation = f"Sandbox execution: {'Passed' if passed else 'Failed'}"
136
+ score.metadata = {
137
+ 'timeout': self.review_timeout,
138
+ 'debug': self.debug,
139
+ 'execution_method': 'sandbox',
140
+ 'detailed_results': detailed_results
141
+ }
142
+ except Exception as e:
143
+ score.value = {'pass': False}
144
+ score.explanation = f'Sandbox evaluation failed: {str(e)}'
145
+ score.metadata = {'error': str(e), 'execution_method': 'sandbox'}
123
146
 
124
147
  score.main_score_name = 'pass'
125
148
  return score
@@ -0,0 +1,220 @@
1
+ import json
2
+ from typing import TYPE_CHECKING, Dict, List, Tuple
3
+
4
+ from evalscope.utils.logger import get_logger
5
+
6
+ if TYPE_CHECKING:
7
+ from evalscope.api.mixin.sandbox_mixin import SandboxMixin
8
+
9
+ logger = get_logger()
10
+
11
+
12
+ def evaluate_in_sandbox(
13
+ adapter: 'SandboxMixin',
14
+ code: str,
15
+ evaluation_sample: str,
16
+ timeout: int = 6,
17
+ debug: bool = False
18
+ ) -> Tuple[bool, Dict]:
19
+ """
20
+ Evaluate code in sandbox environment for Live Code Bench.
21
+
22
+ Args:
23
+ adapter: The adapter instance with sandbox capabilities
24
+ code: The code to evaluate
25
+ evaluation_sample: JSON string containing input/output test cases
26
+ timeout: Timeout for execution
27
+ debug: Whether to enable debug logging
28
+
29
+ Returns:
30
+ Tuple[bool, Dict]: (overall_pass, detailed_results)
31
+ """
32
+ try:
33
+ # Parse the evaluation sample
34
+ test_data = json.loads(evaluation_sample)
35
+ inputs = test_data.get('inputs', [])
36
+ outputs = test_data.get('outputs', [])
37
+ fn_name = test_data.get('fn_name')
38
+
39
+ if debug:
40
+ logger.info(f'Evaluating code with {len(inputs)} test cases')
41
+ logger.info(f'Function name: {fn_name}')
42
+
43
+ # Determine if this is call-based or stdio-based
44
+ if fn_name:
45
+ # Call-based evaluation
46
+ return _evaluate_call_based_in_sandbox(adapter, code, inputs, outputs, fn_name, timeout, debug)
47
+ else:
48
+ # Standard input/output evaluation
49
+ return _evaluate_stdio_in_sandbox(adapter, code, inputs, outputs, timeout, debug)
50
+
51
+ except Exception as e:
52
+ if debug:
53
+ logger.error(f'Sandbox evaluation error: {str(e)}')
54
+ return False, {'error': str(e), 'total_tests': 0, 'passed_tests': 0}
55
+
56
+
57
+ def _evaluate_call_based_in_sandbox(
58
+ adapter: 'SandboxMixin', code: str, inputs: list, outputs: list, fn_name: str, timeout: int, debug: bool
59
+ ) -> Tuple[bool, Dict]:
60
+ """Evaluate call-based problems in sandbox."""
61
+ try:
62
+ all_passed = True
63
+ passed_count = 0
64
+ failed_cases = []
65
+
66
+ for i, (test_input, expected_output) in enumerate(zip(inputs, outputs)):
67
+ # Prepare individual test code for each test case
68
+ test_code = f"""
69
+ import json
70
+ import sys
71
+
72
+ # User's code
73
+ {code}
74
+
75
+ # Test execution for single test case
76
+ try:
77
+ test_input = {repr(test_input)}
78
+ expected_output = {repr(expected_output)}
79
+
80
+ if 'class Solution' in '''{code}''':
81
+ # LeetCode style
82
+ solution = Solution()
83
+ method = getattr(solution, '{fn_name}')
84
+ else:
85
+ # Function is directly available
86
+ method = {fn_name}
87
+
88
+ # Parse input if it's JSON string
89
+ if isinstance(test_input, str):
90
+ try:
91
+ test_input = json.loads(test_input)
92
+ except:
93
+ pass # Keep as string if not valid JSON
94
+
95
+ # Call the method
96
+ if isinstance(test_input, list):
97
+ result = method(*test_input)
98
+ else:
99
+ result = method(test_input)
100
+
101
+ # Parse expected output if it's JSON string
102
+ if isinstance(expected_output, str):
103
+ try:
104
+ expected_output = json.loads(expected_output)
105
+ except:
106
+ pass # Keep as string if not valid JSON
107
+
108
+ # Convert tuple to list for comparison
109
+ if isinstance(result, tuple):
110
+ result = list(result)
111
+
112
+ if result == expected_output:
113
+ print("TEST_PASSED")
114
+ else:
115
+ print(f"TEST_FAILED: expected {{expected_output}}, got {{result}}")
116
+
117
+ except Exception as e:
118
+ print(f"EXECUTION_ERROR: {{str(e)}}")
119
+ import traceback
120
+ traceback.print_exc()
121
+ """
122
+
123
+ # Execute in sandbox
124
+ result = adapter.execute_code_in_sandbox(code=test_code, timeout=timeout, language='python')
125
+
126
+ if debug:
127
+ logger.info(f'Test case {i} execution result: {result}')
128
+
129
+ # Check if execution was successful and test passed
130
+ if result.get('status') == 'success':
131
+ output = result.get('output', '')
132
+ if 'TEST_PASSED' in output:
133
+ passed_count += 1
134
+ elif 'TEST_FAILED:' in output:
135
+ # Extract failure details from output
136
+ for line in output.split('\n'):
137
+ if line.startswith('TEST_FAILED:'):
138
+ failed_cases.append(f"Test {i}: {line.replace('TEST_FAILED: ', '')}")
139
+ break
140
+ all_passed = False
141
+ break
142
+ elif 'EXECUTION_ERROR:' in output:
143
+ # Extract error details
144
+ for line in output.split('\n'):
145
+ if line.startswith('EXECUTION_ERROR:'):
146
+ failed_cases.append(f'Test {i}: {line}')
147
+ break
148
+ all_passed = False
149
+ break
150
+ else:
151
+ failed_cases.append(f'Test {i}: Unknown error in output. Result: {result}')
152
+ all_passed = False
153
+ break
154
+ else:
155
+ failed_cases.append(f'Test {i}: Sandbox execution failed - Result: {result}')
156
+ all_passed = False
157
+ break
158
+
159
+ detailed_results = {'total_tests': len(inputs), 'passed_tests': passed_count, 'failed_cases': failed_cases}
160
+
161
+ return all_passed, detailed_results
162
+
163
+ except Exception as e:
164
+ if debug:
165
+ logger.error(f'Call-based evaluation error: {str(e)}')
166
+ return False, {'error': str(e), 'total_tests': len(inputs), 'passed_tests': 0}
167
+
168
+
169
+ def _evaluate_stdio_in_sandbox(
170
+ adapter: 'SandboxMixin', code: str, inputs: list, outputs: list, timeout: int, debug: bool
171
+ ) -> Tuple[bool, Dict]:
172
+ """Evaluate stdio-based problems in sandbox."""
173
+ try:
174
+ all_passed = True
175
+ passed_count = 0
176
+ failed_cases = []
177
+
178
+ for i, (test_input, expected_output) in enumerate(zip(inputs, outputs)):
179
+ test_code = f"""
180
+ import sys
181
+ from io import StringIO
182
+
183
+ # Redirect stdin
184
+ sys.stdin = StringIO('''{test_input}''')
185
+
186
+ # User's code
187
+ {code}
188
+ """
189
+
190
+ # Execute in sandbox
191
+ result = adapter.execute_code_in_sandbox(code=test_code, timeout=timeout, language='python')
192
+
193
+ if result.get('status') != 'success':
194
+ if debug:
195
+ logger.error(f'Test case {i} execution failed: {result}')
196
+ failed_cases.append(f'Test {i}: Execution error - Result: {result}')
197
+ all_passed = False
198
+ break
199
+
200
+ # Compare output
201
+ actual_output = result.get('output', '').strip()
202
+ expected_output = expected_output.strip()
203
+
204
+ if actual_output == expected_output:
205
+ passed_count += 1
206
+ else:
207
+ if debug:
208
+ logger.info(f"Test case {i} failed: expected '{expected_output}', got '{actual_output}'")
209
+ failed_cases.append(f"Test {i}: Expected '{expected_output}', got '{actual_output}'")
210
+ all_passed = False
211
+ break
212
+
213
+ detailed_results = {'total_tests': len(inputs), 'passed_tests': passed_count, 'failed_cases': failed_cases}
214
+
215
+ return all_passed, detailed_results
216
+
217
+ except Exception as e:
218
+ if debug:
219
+ logger.error(f'Stdio evaluation error: {str(e)}')
220
+ return False, {'error': str(e), 'total_tests': len(inputs), 'passed_tests': 0}