evalscope 1.0.1__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (87) hide show
  1. evalscope/api/benchmark/adapters/default_data_adapter.py +6 -4
  2. evalscope/api/benchmark/adapters/multi_choice_adapter.py +5 -2
  3. evalscope/api/benchmark/adapters/text2image_adapter.py +5 -4
  4. evalscope/api/benchmark/adapters/vision_language_adapter.py +3 -1
  5. evalscope/api/benchmark/benchmark.py +27 -2
  6. evalscope/api/benchmark/meta.py +3 -0
  7. evalscope/api/evaluator/evaluator.py +5 -0
  8. evalscope/api/evaluator/state.py +5 -0
  9. evalscope/api/messages/chat_message.py +6 -1
  10. evalscope/api/mixin/__init__.py +1 -0
  11. evalscope/api/mixin/llm_judge_mixin.py +2 -0
  12. evalscope/api/mixin/sandbox_mixin.py +204 -0
  13. evalscope/api/model/generate_config.py +0 -3
  14. evalscope/api/model/model.py +1 -1
  15. evalscope/api/tool/tool_info.py +1 -1
  16. evalscope/arguments.py +6 -0
  17. evalscope/benchmarks/ai2d/__init__.py +0 -0
  18. evalscope/benchmarks/ai2d/ai2d_adapter.py +53 -0
  19. evalscope/benchmarks/amc/__init__.py +0 -0
  20. evalscope/benchmarks/amc/amc_adapter.py +46 -0
  21. evalscope/benchmarks/bbh/bbh_adapter.py +43 -17
  22. evalscope/benchmarks/bfcl/bfcl_adapter.py +141 -2
  23. evalscope/benchmarks/bfcl/generation.py +7 -7
  24. evalscope/benchmarks/drop/drop_adapter.py +1 -1
  25. evalscope/benchmarks/healthbench/__init__.py +0 -0
  26. evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
  27. evalscope/benchmarks/healthbench/utils.py +102 -0
  28. evalscope/benchmarks/humaneval/humaneval_adapter.py +19 -35
  29. evalscope/benchmarks/humaneval/utils.py +235 -0
  30. evalscope/benchmarks/live_code_bench/evaluate_utils.py +13 -6
  31. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +60 -37
  32. evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
  33. evalscope/benchmarks/math_500/math_500_adapter.py +0 -1
  34. evalscope/benchmarks/minerva_math/__init__.py +0 -0
  35. evalscope/benchmarks/minerva_math/minerva_math_adapter.py +48 -0
  36. evalscope/benchmarks/mm_bench/__init__.py +0 -0
  37. evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
  38. evalscope/benchmarks/mm_star/__init__.py +0 -0
  39. evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
  40. evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +4 -9
  41. evalscope/benchmarks/multi_if/__init__.py +0 -0
  42. evalscope/benchmarks/multi_if/ifeval.py +3354 -0
  43. evalscope/benchmarks/multi_if/metrics.py +120 -0
  44. evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
  45. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +1 -4
  46. evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
  47. evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
  48. evalscope/benchmarks/olympiad_bench/utils.py +565 -0
  49. evalscope/benchmarks/omni_bench/__init__.py +0 -0
  50. evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
  51. evalscope/benchmarks/real_world_qa/__init__.py +0 -0
  52. evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
  53. evalscope/benchmarks/tau_bench/tau_bench_adapter.py +6 -1
  54. evalscope/config.py +24 -1
  55. evalscope/constants.py +3 -0
  56. evalscope/evaluator/evaluator.py +25 -7
  57. evalscope/metrics/metric.py +27 -2
  58. evalscope/models/model_apis.py +10 -8
  59. evalscope/models/utils/openai.py +1 -2
  60. evalscope/perf/arguments.py +2 -0
  61. evalscope/perf/plugin/api/base.py +2 -2
  62. evalscope/perf/plugin/api/default_api.py +7 -7
  63. evalscope/perf/plugin/api/openai_api.py +83 -19
  64. evalscope/perf/plugin/datasets/flickr8k.py +2 -2
  65. evalscope/perf/plugin/datasets/kontext_bench.py +2 -2
  66. evalscope/perf/plugin/datasets/random_vl_dataset.py +2 -2
  67. evalscope/perf/utils/benchmark_util.py +1 -2
  68. evalscope/report/combinator.py +0 -25
  69. evalscope/report/report.py +8 -4
  70. evalscope/run.py +1 -1
  71. evalscope/utils/function_utils.py +41 -0
  72. evalscope/utils/import_utils.py +63 -13
  73. evalscope/utils/io_utils.py +19 -11
  74. evalscope/utils/json_schema.py +23 -2
  75. evalscope/utils/logger.py +19 -0
  76. evalscope/utils/model_utils.py +1 -1
  77. evalscope/version.py +2 -2
  78. {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/METADATA +6 -10
  79. {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/RECORD +87 -59
  80. tests/benchmark/test_eval.py +51 -7
  81. tests/benchmark/test_sandbox.py +81 -0
  82. tests/benchmark/test_vlm.py +60 -3
  83. tests/perf/test_perf.py +40 -12
  84. {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/LICENSE +0 -0
  85. {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/WHEEL +0 -0
  86. {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/entry_points.txt +0 -0
  87. {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/top_level.txt +0 -0
@@ -642,9 +642,7 @@ class DefaultDataAdapter(DataAdapter):
642
642
  """
643
643
  pass
644
644
 
645
- def _on_generate_report(
646
- self, scores: Dict[str, List[AggScore]], model_name: str, add_aggregation_name: bool = True
647
- ) -> Report:
645
+ def _on_generate_report(self, scores: Dict[str, List[AggScore]], model_name: str) -> Report:
648
646
  """
649
647
  Hook method called during report generation.
650
648
 
@@ -660,7 +658,7 @@ class DefaultDataAdapter(DataAdapter):
660
658
  Report: The generated evaluation report
661
659
  """
662
660
  return ReportGenerator.generate_report(
663
- score_dict=scores, model_name=model_name, data_adapter=self, add_aggregation_name=add_aggregation_name
661
+ score_dict=scores, model_name=model_name, data_adapter=self, add_aggregation_name=self.add_aggregation_name
664
662
  )
665
663
 
666
664
  @override
@@ -682,3 +680,7 @@ class DefaultDataAdapter(DataAdapter):
682
680
  report = self._on_generate_report(scores, model_name=model_name)
683
681
  self._on_generate_report_end(report, output_dir, **kwargs)
684
682
  return report
683
+
684
+ def finalize(self, *args, **kwargs):
685
+ # Finalize the evaluation process
686
+ self.sandbox_finalize(*args, **kwargs)
@@ -18,8 +18,11 @@ class MultiChoiceAdapter(DefaultDataAdapter):
18
18
  This adapter formats the input for multi-choice questions and handles few-shot examples.
19
19
  """
20
20
 
21
- multiple_correct: bool = False
22
- """Whether the benchmark allows multiple correct answers."""
21
+ def __init__(self, **kwargs):
22
+ super().__init__(**kwargs)
23
+
24
+ self.multiple_correct: bool = False
25
+ """Whether the benchmark allows multiple correct answers."""
23
26
 
24
27
  def format_prompt_template(self, sample: Sample) -> str:
25
28
  """
@@ -19,6 +19,11 @@ logger = get_logger()
19
19
  class Text2ImageAdapter(DefaultDataAdapter):
20
20
  """Text to Image Adapter for benchmarks."""
21
21
 
22
+ def __init__(self, **kwargs):
23
+ super().__init__(**kwargs)
24
+
25
+ self.add_aggregation_name = False # Do not add aggregation name in the report by default
26
+
22
27
  def load_from_disk(self, **kwargs):
23
28
  return super().load_from_disk(use_local_loader=True)
24
29
 
@@ -150,7 +155,3 @@ class Text2ImageAdapter(DefaultDataAdapter):
150
155
  score.metadata[metric_name] = f'error: {str(e)}'
151
156
 
152
157
  return score
153
-
154
- def _on_generate_report(self, scores, model_name, add_aggregation_name=True):
155
- # Don't add aggregation name for needle haystack adapter
156
- return super()._on_generate_report(scores, model_name, False)
@@ -3,4 +3,6 @@ from .default_data_adapter import DefaultDataAdapter
3
3
 
4
4
  class VisionLanguageAdapter(DefaultDataAdapter):
5
5
  """Adapter for vision-language benchmarks. e.g., image captioning, visual question answering, etc."""
6
- pass
6
+
7
+ def __init__(self, **kwargs):
8
+ super().__init__(**kwargs)
@@ -9,7 +9,7 @@ from evalscope.api.dataset import DatasetDict, Sample
9
9
  from evalscope.api.evaluator import TaskState
10
10
  from evalscope.api.filter import FilterEnsemble, build_filter_ensemble
11
11
  from evalscope.api.metric import AggScore, SampleScore
12
- from evalscope.api.mixin import LLMJudgeMixin
12
+ from evalscope.api.mixin import LLMJudgeMixin, SandboxMixin
13
13
  from evalscope.api.model import Model
14
14
  from evalscope.report import Report
15
15
  from evalscope.utils.logger import get_logger
@@ -21,7 +21,7 @@ if TYPE_CHECKING:
21
21
  logger = get_logger()
22
22
 
23
23
 
24
- class DataAdapter(LLMJudgeMixin, ABC):
24
+ class DataAdapter(LLMJudgeMixin, SandboxMixin, ABC):
25
25
  """
26
26
  Data Adapter for the benchmark.
27
27
  """
@@ -43,6 +43,12 @@ class DataAdapter(LLMJudgeMixin, ABC):
43
43
  self.save_metadata = True
44
44
  """Whether to save metadata in the review result"""
45
45
 
46
+ self.add_aggregation_name = True
47
+ """Whether to add aggregation name in the report"""
48
+
49
+ self.add_overall_metric = True
50
+ """Whether to add overall metric in the report"""
51
+
46
52
  self.category_map = {}
47
53
  """Category map for the benchmark"""
48
54
 
@@ -86,6 +92,11 @@ class DataAdapter(LLMJudgeMixin, ABC):
86
92
  """
87
93
  pass
88
94
 
95
+ @abstractmethod
96
+ def finalize(self, *args, **kwargs) -> None:
97
+ """Finalize the evaluation process."""
98
+ pass
99
+
89
100
  @property
90
101
  def name(self) -> str:
91
102
  """
@@ -334,6 +345,20 @@ class DataAdapter(LLMJudgeMixin, ABC):
334
345
  """
335
346
  self._benchmark_meta.shuffle_choices = value
336
347
 
348
+ @property
349
+ def review_timeout(self) -> Optional[float]:
350
+ """
351
+ Return the timeout for the review process.
352
+ """
353
+ return self._benchmark_meta.review_timeout
354
+
355
+ @review_timeout.setter
356
+ def review_timeout(self, value: float):
357
+ """
358
+ Set the timeout for the review process.
359
+ """
360
+ self._benchmark_meta.review_timeout = value
361
+
337
362
  @contextlib.contextmanager
338
363
  def _temporary_attribute(self, attr_name: str, new_value):
339
364
  """
@@ -79,6 +79,9 @@ class BenchmarkMeta:
79
79
  shuffle_choices: bool = False
80
80
  """Whether to shuffle the choices in multiple-choice datasets."""
81
81
 
82
+ review_timeout: Optional[float] = None
83
+ """ Timeout for review in seconds."""
84
+
82
85
  extra_params: Dict = field(default_factory=dict)
83
86
  """ Additional parameters for the benchmark."""
84
87
 
@@ -54,3 +54,8 @@ class Evaluator(abc.ABC):
54
54
  def get_report(self, *args, **kwargs) -> Report:
55
55
  """Get the evaluation report."""
56
56
  pass
57
+
58
+ @abc.abstractmethod
59
+ def finalize(self, *args, **kwargs) -> None:
60
+ """Finalize the evaluation process."""
61
+ pass
@@ -273,3 +273,8 @@ class TaskState:
273
273
  def target(self) -> str:
274
274
  """The scoring target for this `Sample`."""
275
275
  return self._target.text
276
+
277
+ @target.setter
278
+ def target(self, text: str) -> None:
279
+ """Set the target for review purposes."""
280
+ self._target = Target(text)
@@ -3,7 +3,7 @@ from pydantic import BaseModel, Field, JsonValue, model_validator
3
3
  from typing import Any, Dict, List, Literal, Optional, Type, Union
4
4
 
5
5
  from evalscope.api.tool import ToolCall, ToolCallError
6
- from .content import Content, ContentImage, ContentReasoning, ContentText
6
+ from .content import Content, ContentAudio, ContentImage, ContentReasoning, ContentText
7
7
  from .utils import parse_content_with_reasoning
8
8
 
9
9
 
@@ -225,6 +225,11 @@ def messages_to_markdown(messages: List[ChatMessage], max_length: Optional[int]
225
225
  if max_length and len(image_base64) > max_length:
226
226
  image_base64 = image_base64[:max_length]
227
227
  content_parts.append(f'![image]({image_base64})')
228
+ elif isinstance(content_item, ContentAudio):
229
+ audio_base64 = content_item.audio
230
+ if max_length and len(audio_base64) > max_length:
231
+ audio_base64 = audio_base64[:max_length]
232
+ content_parts.append(f"<audio controls src='{audio_base64}'></audio>")
228
233
  elif isinstance(content_item, ContentReasoning):
229
234
  content_parts.append(f'**Reasoning:** {content_item.reasoning}')
230
235
 
@@ -1 +1,2 @@
1
1
  from .llm_judge_mixin import LLMJudgeMixin
2
+ from .sandbox_mixin import SandboxMixin
@@ -24,6 +24,8 @@ class LLMJudgeMixin:
24
24
 
25
25
  self._llm_judge: Optional[LLMJudge] = None
26
26
 
27
+ super().__init__(task_config=task_config)
28
+
27
29
  @property
28
30
  def llm_judge(self) -> Optional[LLMJudge]:
29
31
  """Get LLM judge instance with lazy initialization."""
@@ -0,0 +1,204 @@
1
+ import asyncio
2
+ import threading
3
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional
4
+
5
+ from evalscope.utils.logger import get_logger
6
+
7
+ if TYPE_CHECKING:
8
+ from ms_enclave.sandbox.manager import SandboxManager
9
+
10
+ from evalscope.config import TaskConfig
11
+
12
+ logger = get_logger()
13
+
14
+
15
+ class SandboxMixin:
16
+ """Sandbox mixin for sandboxed code execution."""
17
+
18
+ def __init__(self, task_config: 'TaskConfig'):
19
+ self._task_config = task_config
20
+
21
+ self._manager: Optional['SandboxManager'] = None
22
+ """Sandbox manager instance."""
23
+
24
+ self._sandbox_id: Optional[str] = None
25
+ """Sandbox ID."""
26
+
27
+ self._loop: Optional[asyncio.AbstractEventLoop] = None
28
+ """Event loop for async operations."""
29
+
30
+ # Initialize sandbox synchronously by running async methods
31
+ if self.use_sandbox:
32
+ self._loop = asyncio.new_event_loop()
33
+
34
+ # Start the loop in a separate thread
35
+ def run_loop():
36
+ asyncio.set_event_loop(self._loop)
37
+ self._loop.run_forever()
38
+
39
+ self._loop_thread = threading.Thread(target=run_loop, daemon=True)
40
+ self._loop_thread.start()
41
+
42
+ # Wait for initialization
43
+ future = asyncio.run_coroutine_threadsafe(self._async_init(), self._loop)
44
+ future.result()
45
+
46
+ super().__init__()
47
+
48
+ async def _async_init(self):
49
+ """Async initialization helper."""
50
+ await self.init_sandbox_manager_async()
51
+ await self.init_sandbox_async()
52
+
53
+ @property
54
+ def use_sandbox(self) -> bool:
55
+ """
56
+ Return whether to use sandbox for the benchmark.
57
+ """
58
+ if not self._task_config:
59
+ return False
60
+ else:
61
+ return self._task_config.use_sandbox
62
+
63
+ @property
64
+ def sandbox_manager(self) -> Optional['SandboxManager']:
65
+ """Get the sandbox manager instance."""
66
+ return self._manager
67
+
68
+ @property
69
+ def sandbox_id(self) -> Optional[str]:
70
+ """Get the sandbox ID."""
71
+ return self._sandbox_id
72
+
73
+ async def init_sandbox_manager_async(self) -> Optional['SandboxManager']:
74
+ """Initialize the sandbox manager asynchronously."""
75
+ if self._manager is not None:
76
+ return self._manager
77
+
78
+ if not self.use_sandbox:
79
+ return None
80
+
81
+ from ms_enclave.sandbox.manager import HttpSandboxManager, LocalSandboxManager
82
+
83
+ manager_config = self._task_config.sandbox_manager_config or {}
84
+ if manager_config.get('base_url'):
85
+ # Remote manager
86
+ self._manager = HttpSandboxManager(**manager_config)
87
+ else:
88
+ # Local manager
89
+ self._manager = LocalSandboxManager(**manager_config)
90
+
91
+ await self._manager.start()
92
+ logger.info('Sandbox manager initialized.')
93
+ return self._manager
94
+
95
+ def init_sandbox_manager(self) -> Optional['SandboxManager']:
96
+ """Initialize the sandbox manager."""
97
+ if self._manager is not None:
98
+ return self._manager
99
+
100
+ if not self.use_sandbox:
101
+ return None
102
+
103
+ # Use the dedicated loop if available
104
+ if self._loop and not self._loop.is_closed():
105
+ future = asyncio.run_coroutine_threadsafe(self.init_sandbox_manager_async(), self._loop)
106
+ return future.result()
107
+ else:
108
+ # Fallback for cases where no loop is available
109
+ return asyncio.run(self.init_sandbox_manager_async())
110
+
111
+ async def init_sandbox_async(self) -> Optional[str]:
112
+ """Initialize the sandbox instance asynchronously."""
113
+ if self._sandbox_id is not None:
114
+ return self._sandbox_id
115
+
116
+ if not self.use_sandbox:
117
+ return None
118
+
119
+ from ms_enclave.sandbox.model import DockerSandboxConfig, SandboxType
120
+
121
+ sandbox_config = self._task_config.sandbox_config or DockerSandboxConfig(
122
+ image='python:3.11-slim', tools_config={
123
+ 'shell_executor': {},
124
+ 'python_executor': {}
125
+ }
126
+ )
127
+ sandbox_type = self._task_config.sandbox_type or SandboxType.DOCKER
128
+
129
+ self._sandbox_id = await self._manager.create_sandbox(sandbox_type=sandbox_type, config=sandbox_config)
130
+
131
+ sandbox_info = await self._manager.get_sandbox_info(self._sandbox_id)
132
+
133
+ logger.info(f'Sandbox of type {sandbox_type} initialized. Info: {sandbox_info.model_dump(exclude_none=True)}')
134
+ return self._sandbox_id
135
+
136
+ def init_sandbox(self) -> Optional[str]:
137
+ """Initialize the sandbox instance."""
138
+ if self._sandbox_id is not None:
139
+ return self._sandbox_id
140
+
141
+ if not self.use_sandbox:
142
+ return None
143
+
144
+ # Use the dedicated loop if available
145
+ if self._loop and not self._loop.is_closed():
146
+ future = asyncio.run_coroutine_threadsafe(self.init_sandbox_async(), self._loop)
147
+ return future.result()
148
+ else:
149
+ # Fallback for cases where no loop is available
150
+ return asyncio.run(self.init_sandbox_async())
151
+
152
+ def execute_code_in_sandbox(self, code: str, timeout: int = 60, language: str = 'python') -> Dict[str, Any]:
153
+ """Execute code in the sandbox."""
154
+ if not self._sandbox_id or not self._manager:
155
+ logger.warning('Sandbox is not initialized.')
156
+ return {'error': 'Sandbox is not initialized.'}
157
+
158
+ from ms_enclave.sandbox.model import ExecutionStatus, ToolResult
159
+
160
+ async def _execute_async():
161
+ if language.lower() == 'python':
162
+ tool_name = 'python_executor'
163
+ parameters = {'code': code, 'timeout': timeout}
164
+ result = await self._manager.execute_tool(self._sandbox_id, tool_name, parameters)
165
+ elif language.lower() == 'shell':
166
+ tool_name = 'shell_executor'
167
+ parameters = {'command': code, 'timeout': timeout}
168
+ result = await self._manager.execute_tool(self._sandbox_id, tool_name, parameters)
169
+ else:
170
+ logger.warning(f"Unsupported language: {language}. Supported languages are 'python' and 'shell'.")
171
+ result = ToolResult(
172
+ status=ExecutionStatus.ERROR,
173
+ tool_name='code_executor',
174
+ output=f"Unsupported language: {language}. Supported languages are 'python' and 'shell'."
175
+ )
176
+ return result
177
+
178
+ # Use the dedicated loop if available
179
+ if self._loop and not self._loop.is_closed():
180
+ future = asyncio.run_coroutine_threadsafe(_execute_async(), self._loop)
181
+ result = future.result(timeout + 10) # Add some buffer to the timeout
182
+ else:
183
+ # Fallback for cases where no loop is available
184
+ result = asyncio.run(_execute_async())
185
+
186
+ return result.model_dump(exclude_none=True)
187
+
188
+ def sandbox_finalize(self, *args, **kwargs):
189
+ """Finalize the sandbox manager."""
190
+ if self._manager:
191
+ try:
192
+ if self._loop and not self._loop.is_closed():
193
+ # Stop the manager using the dedicated loop
194
+ future = asyncio.run_coroutine_threadsafe(self._manager.stop(), self._loop)
195
+ future.result(timeout=30)
196
+
197
+ # Stop the event loop
198
+ self._loop.call_soon_threadsafe(self._loop.stop)
199
+ if hasattr(self, '_loop_thread'):
200
+ self._loop_thread.join(timeout=5)
201
+
202
+ logger.info('Sandbox manager finalized.')
203
+ except Exception as e:
204
+ logger.warning(f'Error finalizing sandbox manager: {e}')
@@ -36,9 +36,6 @@ class GenerateConfig(BaseModel):
36
36
  stream: Optional[bool] = Field(default=None)
37
37
  """Whether to stream the response (default is model specific)."""
38
38
 
39
- system_message: Optional[str] = Field(default=None)
40
- """Override the default system message."""
41
-
42
39
  max_tokens: Optional[int] = Field(default=None)
43
40
  """The maximum number of tokens that can be generated in the completion (default is model specific)."""
44
41
 
@@ -365,7 +365,7 @@ def get_model(
365
365
 
366
366
  logger.info(
367
367
  f'Creating model {model} with eval_type={eval_type} '
368
- f'base_url={base_url}, api_key={api_key}, config={config}, model_args={model_args}'
368
+ f'base_url={base_url}, config={config.model_dump(exclude_none=True)}, model_args={model_args}'
369
369
  )
370
370
 
371
371
  # find a matching model type
@@ -1,7 +1,7 @@
1
1
  import inspect
2
2
  from dataclasses import dataclass
3
3
  from docstring_parser import Docstring, parse
4
- from pydantic import BaseModel, Field
4
+ from pydantic import BaseModel, Field, field_validator
5
5
  from typing import Any, Callable, Dict, List, Literal, Optional, TypeAlias, Union, get_args, get_type_hints
6
6
 
7
7
  from evalscope.utils.json_schema import JSONSchema, JSONType, json_schema, python_type_to_json_type
evalscope/arguments.py CHANGED
@@ -87,6 +87,12 @@ def add_argument(parser: argparse.ArgumentParser):
87
87
  parser.add_argument('--judge-model-args', type=json.loads, default='{}', help='The judge model args, should be a json string.') # noqa: E501
88
88
  parser.add_argument('--judge-worker-num', type=int, default=1, help='The number of workers for the judge model.')
89
89
  parser.add_argument('--analysis-report', action='store_true', default=False, help='Generate analysis report for the evaluation results using judge model.') # noqa: E501
90
+
91
+ # Sandbox-related arguments
92
+ parser.add_argument('--use-sandbox', action='store_true', default=False, help='Whether to use sandbox for model evaluation.') # noqa: E501
93
+ parser.add_argument('--sandbox-type', type=str, default='docker', help='The sandbox type to use.') # noqa: E501
94
+ parser.add_argument('--sandbox-config', type=json.loads, default='{}', help='The sandbox config, should be a json string.') # noqa: E501
95
+ parser.add_argument('--sandbox-manager-config', type=json.loads, default='{}', help='The sandbox manager config, should be a json string.') # noqa: E501
90
96
  # yapf: enable
91
97
 
92
98
 
File without changes
@@ -0,0 +1,53 @@
1
+ from typing import Any, Dict, List
2
+
3
+ from evalscope.api.benchmark import BenchmarkMeta, VisionLanguageAdapter
4
+ from evalscope.api.dataset import Sample
5
+ from evalscope.api.evaluator import TaskState
6
+ from evalscope.api.messages import ChatMessageUser, Content, ContentImage, ContentText
7
+ from evalscope.api.registry import register_benchmark
8
+ from evalscope.constants import Tags
9
+ from evalscope.utils.io_utils import bytes_to_base64
10
+ from evalscope.utils.logger import get_logger
11
+ from evalscope.utils.multi_choices import MultipleChoiceTemplate, parse_answers, prompt
12
+
13
+ logger = get_logger()
14
+
15
+ SUBSET_LIST = ['default']
16
+
17
+ MULT_CHOICE_PROMPT = MultipleChoiceTemplate.SINGLE_ANSWER_COT
18
+
19
+
20
+ @register_benchmark(
21
+ BenchmarkMeta(
22
+ name='ai2d',
23
+ pretty_name='AI2D',
24
+ tags=[Tags.MULTI_MODAL, Tags.KNOWLEDGE, Tags.QA],
25
+ description='A Diagram Is Worth A Dozen Images',
26
+ dataset_id='lmms-lab/ai2d',
27
+ subset_list=SUBSET_LIST,
28
+ metric_list=['acc'],
29
+ eval_split='test',
30
+ prompt_template=MULT_CHOICE_PROMPT,
31
+ )
32
+ )
33
+ class Ai2dAdapter(VisionLanguageAdapter):
34
+
35
+ def __init__(self, **kwargs):
36
+ super().__init__(**kwargs)
37
+
38
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
39
+ answers_list: list[str] = record['options']
40
+ input_text = prompt(question=record['question'], choices=answers_list, template=MULT_CHOICE_PROMPT)
41
+ content_list: list[Content] = [ContentText(text=input_text)]
42
+ image = record.get('image')
43
+ if image:
44
+ image_base64 = bytes_to_base64(image['bytes'], format='png', add_header=True)
45
+ content_list.append(ContentImage(image=image_base64))
46
+
47
+ label_answer = chr(int(record['answer']) + ord('A'))
48
+
49
+ return Sample(input=[ChatMessageUser(content=content_list)], choices=answers_list, target=label_answer)
50
+
51
+ def extract_answer(self, prediction: str, task_state: TaskState) -> str:
52
+ answers = parse_answers(task_state)
53
+ return ''.join(sorted(list(answers)))
File without changes
@@ -0,0 +1,46 @@
1
+ from typing import Any, Dict
2
+
3
+ from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
4
+ from evalscope.api.dataset import Sample
5
+ from evalscope.api.registry import register_benchmark
6
+ from evalscope.constants import Tags
7
+ from evalscope.utils.logger import get_logger
8
+
9
+ logger = get_logger()
10
+
11
+
12
+ @register_benchmark(
13
+ BenchmarkMeta(
14
+ name='amc',
15
+ pretty_name='AMC',
16
+ tags=[Tags.MATH, Tags.REASONING],
17
+ description=
18
+ 'AMC (American Mathematics Competitions) is a series of mathematics competitions for high school students.',
19
+ dataset_id='evalscope/amc_22-24',
20
+ subset_list=['amc22', 'amc23', 'amc24'],
21
+ metric_list=[{
22
+ 'acc': {
23
+ 'numeric': True
24
+ }
25
+ }],
26
+ prompt_template='{question}\nPlease reason step by step, and put your final answer within \\boxed{{}}.',
27
+ )
28
+ )
29
+ class AMCAdapter(DefaultDataAdapter):
30
+
31
+ def __init__(self, *args, **kwargs):
32
+ super().__init__(*args, **kwargs)
33
+
34
+ # Use split as subset
35
+ self.split_as_subset = True
36
+
37
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
38
+ return Sample(
39
+ input=record['problem'],
40
+ target=record['answer'],
41
+ metadata={
42
+ 'year': record['year'],
43
+ 'url': record['url'],
44
+ 'solution': record.get('solution', '')
45
+ },
46
+ )
@@ -141,35 +141,61 @@ class BBHAdapter(DefaultDataAdapter):
141
141
  @classmethod
142
142
  def _extract_mc_answer(cls, ans: str) -> str:
143
143
  """
144
- Extract the answer from the model output for Multiple choice task.
144
+ Extract normalized answer for BBH multiple-choice tasks.
145
+ Handles formats like:
146
+ - "answer is (A)"
147
+ - "The answer is A."
148
+ - Extra text after answer.
149
+ Always uses the *last* occurrence of "answer is".
145
150
  """
146
- ans_line = ans.split('answer is ')
147
- if len(ans_line) != 1:
148
- ans = ans_line[1].strip()
149
- match = re.search(r'\(([A-Z])\)*', ans)
151
+ ans = ans.strip()
152
+
153
+ parts = ans.split('So the answer is ')
154
+ if len(parts) > 1:
155
+ ans = parts[-1].strip()
156
+ ans = ans.split('\n')[0].strip()
157
+
158
+ # Remove trailing period
159
+ if ans.endswith('.'):
160
+ ans = ans[:-1].strip()
161
+
162
+ # Capture uppercase letter inside parentheses (A) (B) ...
163
+ match = re.search(r'\(([A-Z])\)', ans)
150
164
  if match:
151
165
  return match.group(1)
152
- match = re.search(r'([A-Z])', ans)
166
+
167
+ # Capture single uppercase letter
168
+ match = re.search(r'\b([A-Z])\b', ans)
153
169
  if match:
154
170
  return match.group(1)
171
+
155
172
  return ans
156
173
 
157
174
  @classmethod
158
175
  def _extract_ff_answer(cls, ans: str):
159
176
  """
160
- Extract the answer from the model output for Free-form task.
177
+ Extract the normalized answer for BBH free-form tasks.
178
+ Handles patterns like:
179
+ - "answer is XXX."
180
+ - "The answer is **valid**."
181
+ - Extra trailing dots / line breaks.
182
+ - Bold-marked answers (**xxx**).
183
+ Always uses the *last* occurrence of "answer is".
161
184
  """
162
- pattern = r'answer is\s+(.*?)\.'
185
+ ans = ans.strip()
163
186
 
164
- match = re.search(pattern, ans)
165
- if match:
166
- res = match.group(1)
167
- return res
187
+ parts = ans.split('So the answer is ')
188
+ if len(parts) > 1:
189
+ ans = parts[-1].strip()
190
+ ans = ans.split('\n')[0].strip()
168
191
 
169
- ans_line = ans.split('answer is ')
170
- if len(ans_line) != 1:
171
- ans = ans_line[1].strip()
172
- ans = ans.split('\n')[0]
192
+ # Remove trailing period
173
193
  if ans.endswith('.'):
174
- ans = ans[:-1]
194
+ ans = ans[:-1].strip()
195
+
196
+ # If answer is in bold (**xxx**), prefer the content inside
197
+ match = re.search(r'\*\*(.*?)\*\*', ans)
198
+ if match:
199
+ ans = match.group(1).strip()
200
+
175
201
  return ans