evalscope 1.0.1__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (87) hide show
  1. evalscope/api/benchmark/adapters/default_data_adapter.py +6 -4
  2. evalscope/api/benchmark/adapters/multi_choice_adapter.py +5 -2
  3. evalscope/api/benchmark/adapters/text2image_adapter.py +5 -4
  4. evalscope/api/benchmark/adapters/vision_language_adapter.py +3 -1
  5. evalscope/api/benchmark/benchmark.py +27 -2
  6. evalscope/api/benchmark/meta.py +3 -0
  7. evalscope/api/evaluator/evaluator.py +5 -0
  8. evalscope/api/evaluator/state.py +5 -0
  9. evalscope/api/messages/chat_message.py +6 -1
  10. evalscope/api/mixin/__init__.py +1 -0
  11. evalscope/api/mixin/llm_judge_mixin.py +2 -0
  12. evalscope/api/mixin/sandbox_mixin.py +204 -0
  13. evalscope/api/model/generate_config.py +0 -3
  14. evalscope/api/model/model.py +1 -1
  15. evalscope/api/tool/tool_info.py +1 -1
  16. evalscope/arguments.py +6 -0
  17. evalscope/benchmarks/ai2d/__init__.py +0 -0
  18. evalscope/benchmarks/ai2d/ai2d_adapter.py +53 -0
  19. evalscope/benchmarks/amc/__init__.py +0 -0
  20. evalscope/benchmarks/amc/amc_adapter.py +46 -0
  21. evalscope/benchmarks/bbh/bbh_adapter.py +43 -17
  22. evalscope/benchmarks/bfcl/bfcl_adapter.py +141 -2
  23. evalscope/benchmarks/bfcl/generation.py +7 -7
  24. evalscope/benchmarks/drop/drop_adapter.py +1 -1
  25. evalscope/benchmarks/healthbench/__init__.py +0 -0
  26. evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
  27. evalscope/benchmarks/healthbench/utils.py +102 -0
  28. evalscope/benchmarks/humaneval/humaneval_adapter.py +19 -35
  29. evalscope/benchmarks/humaneval/utils.py +235 -0
  30. evalscope/benchmarks/live_code_bench/evaluate_utils.py +13 -6
  31. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +60 -37
  32. evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
  33. evalscope/benchmarks/math_500/math_500_adapter.py +0 -1
  34. evalscope/benchmarks/minerva_math/__init__.py +0 -0
  35. evalscope/benchmarks/minerva_math/minerva_math_adapter.py +48 -0
  36. evalscope/benchmarks/mm_bench/__init__.py +0 -0
  37. evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
  38. evalscope/benchmarks/mm_star/__init__.py +0 -0
  39. evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
  40. evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +4 -9
  41. evalscope/benchmarks/multi_if/__init__.py +0 -0
  42. evalscope/benchmarks/multi_if/ifeval.py +3354 -0
  43. evalscope/benchmarks/multi_if/metrics.py +120 -0
  44. evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
  45. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +1 -4
  46. evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
  47. evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
  48. evalscope/benchmarks/olympiad_bench/utils.py +565 -0
  49. evalscope/benchmarks/omni_bench/__init__.py +0 -0
  50. evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
  51. evalscope/benchmarks/real_world_qa/__init__.py +0 -0
  52. evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
  53. evalscope/benchmarks/tau_bench/tau_bench_adapter.py +6 -1
  54. evalscope/config.py +24 -1
  55. evalscope/constants.py +3 -0
  56. evalscope/evaluator/evaluator.py +25 -7
  57. evalscope/metrics/metric.py +27 -2
  58. evalscope/models/model_apis.py +10 -8
  59. evalscope/models/utils/openai.py +1 -2
  60. evalscope/perf/arguments.py +2 -0
  61. evalscope/perf/plugin/api/base.py +2 -2
  62. evalscope/perf/plugin/api/default_api.py +7 -7
  63. evalscope/perf/plugin/api/openai_api.py +83 -19
  64. evalscope/perf/plugin/datasets/flickr8k.py +2 -2
  65. evalscope/perf/plugin/datasets/kontext_bench.py +2 -2
  66. evalscope/perf/plugin/datasets/random_vl_dataset.py +2 -2
  67. evalscope/perf/utils/benchmark_util.py +1 -2
  68. evalscope/report/combinator.py +0 -25
  69. evalscope/report/report.py +8 -4
  70. evalscope/run.py +1 -1
  71. evalscope/utils/function_utils.py +41 -0
  72. evalscope/utils/import_utils.py +63 -13
  73. evalscope/utils/io_utils.py +19 -11
  74. evalscope/utils/json_schema.py +23 -2
  75. evalscope/utils/logger.py +19 -0
  76. evalscope/utils/model_utils.py +1 -1
  77. evalscope/version.py +2 -2
  78. {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/METADATA +6 -10
  79. {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/RECORD +87 -59
  80. tests/benchmark/test_eval.py +51 -7
  81. tests/benchmark/test_sandbox.py +81 -0
  82. tests/benchmark/test_vlm.py +60 -3
  83. tests/perf/test_perf.py +40 -12
  84. {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/LICENSE +0 -0
  85. {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/WHEEL +0 -0
  86. {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/entry_points.txt +0 -0
  87. {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,64 @@
1
+ import re
2
+ from typing import Any, Dict, List
3
+
4
+ from evalscope.api.benchmark import BenchmarkMeta, VisionLanguageAdapter
5
+ from evalscope.api.dataset import Sample
6
+ from evalscope.api.evaluator import TaskState
7
+ from evalscope.api.messages import ChatMessageUser, Content, ContentImage, ContentText
8
+ from evalscope.api.registry import register_benchmark
9
+ from evalscope.constants import Tags
10
+ from evalscope.utils.io_utils import bytes_to_base64
11
+ from evalscope.utils.logger import get_logger
12
+
13
+ logger = get_logger()
14
+
15
+ SUBSET_LIST = ['default']
16
+
17
+ OPEN_PROMPT = (
18
+ 'Read the picture and solve the following problem step by step.'
19
+ 'The last line of your response should be of the form'
20
+ ' "ANSWER: $ANSWER" (without quotes) where $ANSWER is the answer to the problem.\n\n'
21
+ '{question}\n\n'
22
+ 'Remember to put your answer on its own line at the end in the form'
23
+ ' "ANSWER: $ANSWER" (without quotes) where $ANSWER is the answer to the problem,'
24
+ ' and you do not need to use a \\boxed command.'
25
+ )
26
+
27
+
28
+ @register_benchmark(
29
+ BenchmarkMeta(
30
+ name='real_world_qa',
31
+ pretty_name='RealWorldQA',
32
+ tags=[Tags.MULTI_MODAL, Tags.KNOWLEDGE, Tags.QA],
33
+ description=
34
+ 'RealWorldQA is a benchmark designed to evaluate the real-world spatial understanding capabilities of multimodal AI models, contributed by XAI. It assesses how well these models comprehend physical environments. The benchmark consists of 700+ images, each accompanied by a question and a verifiable answer. These images are drawn from real-world scenarios, including those captured from vehicles. The goal is to advance AI models\' understanding of our physical world.', # noqa: E501
35
+ dataset_id='lmms-lab/RealWorldQA',
36
+ subset_list=SUBSET_LIST,
37
+ metric_list=['acc'],
38
+ eval_split='test',
39
+ prompt_template=OPEN_PROMPT,
40
+ )
41
+ )
42
+ class RealWorldQAAdapter(VisionLanguageAdapter):
43
+
44
+ def __init__(self, **kwargs):
45
+ super().__init__(**kwargs)
46
+
47
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
48
+ content_list: list[Content] = [ContentText(text=OPEN_PROMPT.format(question=record['question']))]
49
+ image = record.get('image')
50
+ if image:
51
+ image_base64 = bytes_to_base64(image['bytes'], format='webp', add_header=True)
52
+ content_list.append(ContentImage(image=image_base64))
53
+ return Sample(
54
+ input=[ChatMessageUser(content=content_list)],
55
+ target=record['answer'],
56
+ metadata={'image_path': record['image_path']}
57
+ )
58
+
59
+ def extract_answer(self, prediction: str, task_state: TaskState) -> str:
60
+ pattern = r'ANSWER:\s*(.*)'
61
+ match = re.search(pattern, prediction)
62
+ if match:
63
+ return match.group(1).strip()
64
+ return ''
@@ -47,7 +47,12 @@ class TauBenchAdapter(DefaultDataAdapter):
47
47
  def __init__(self, **kwargs):
48
48
  super().__init__(**kwargs)
49
49
 
50
- check_import('tau_bench', package='git+https://github.com/sierra-research/tau-bench', raise_error=True)
50
+ check_import(
51
+ 'tau_bench',
52
+ package='git+https://github.com/sierra-research/tau-bench',
53
+ raise_error=True,
54
+ feature_name=self.pretty_name
55
+ )
51
56
 
52
57
  # setup user model args
53
58
  self.user_model = self.extra_params.get('user_model', 'qwen-plus')
evalscope/config.py CHANGED
@@ -18,6 +18,7 @@ from evalscope.constants import (
18
18
  )
19
19
  from evalscope.utils.argument_utils import BaseArgument, parse_int_or_float
20
20
  from evalscope.utils.deprecation_utils import deprecated_warning
21
+ from evalscope.utils.import_utils import check_import
21
22
  from evalscope.utils.io_utils import dict_to_yaml, gen_hash, safe_filename
22
23
  from evalscope.utils.logger import get_logger
23
24
 
@@ -124,6 +125,19 @@ class TaskConfig(BaseArgument):
124
125
  analysis_report: bool = False
125
126
  """Whether to generate detailed analysis reports after evaluation."""
126
127
 
128
+ # Sandbox configuration arguments
129
+ use_sandbox: bool = False
130
+ """Whether to execute code in a sandboxed environment."""
131
+
132
+ sandbox_type: Optional[str] = 'docker'
133
+ """Type of sandbox environment for code execution (e.g., docker). Default is 'docker'."""
134
+
135
+ sandbox_manager_config: Optional[Dict] = field(default_factory=dict)
136
+ """Configuration for the sandbox manager. Default is local manager. If url is provided, it will use remote manager."""
137
+
138
+ sandbox_config: Optional[Dict] = field(default_factory=dict)
139
+ """Configuration for sandboxed code execution environments."""
140
+
127
141
  def __post_init__(self):
128
142
  self.__init_model_and_id()
129
143
 
@@ -132,6 +146,7 @@ class TaskConfig(BaseArgument):
132
146
  # Set default generation_config and model_args
133
147
  self.__init_default_generation_config()
134
148
  self.__init_default_model_args()
149
+ self.__init_default_sandbox_config()
135
150
 
136
151
  def __init_model_and_id(self):
137
152
  # Set model to DummyCustomModel if not provided
@@ -223,6 +238,14 @@ class TaskConfig(BaseArgument):
223
238
  'precision': 'torch.float16',
224
239
  }
225
240
 
241
+ def __init_default_sandbox_config(self):
242
+ if not self.use_sandbox:
243
+ return
244
+ check_import('ms_enclave', 'ms_enclave[docker]', raise_error=True)
245
+
246
+ if not self.sandbox_type:
247
+ self.sandbox_type = 'docker'
248
+
226
249
  def update(self, other: Union['TaskConfig', dict]):
227
250
  if isinstance(other, TaskConfig):
228
251
  other = other.to_dict()
@@ -238,7 +261,7 @@ class TaskConfig(BaseArgument):
238
261
  logger.warning(f'Failed to dump overall task config: {e}')
239
262
 
240
263
  def to_dict(self):
241
- result = copy.deepcopy(self.__dict__)
264
+ result = copy.copy(self.__dict__)
242
265
  del result['api_key'] # Do not expose api_key in the config
243
266
 
244
267
  if isinstance(self.model, (Model, ModelAPI)):
evalscope/constants.py CHANGED
@@ -15,6 +15,7 @@ DEFAULT_ROOT_CACHE_DIR = DEFAULT_DATASET_CACHE_DIR # compatible with old versio
15
15
  DEFAULT_EVALSCOPE_CACHE_DIR = os.path.expanduser(
16
16
  os.getenv('EVALSCOPE_CACHE', '~/.cache/evalscope')
17
17
  ) # ~/.cache/evalscope
18
+ IS_BUILD_DOC = os.getenv('BUILD_DOC', '0') == '1' # To avoid some heavy dependencies when building doc
18
19
 
19
20
 
20
21
  class HubType:
@@ -130,6 +131,8 @@ class Tags:
130
131
  TEXT_TO_IMAGE = 'TextToImage'
131
132
  IMAGE_EDITING = 'ImageEditing'
132
133
  MULTI_MODAL = 'MultiModal'
134
+ MULTI_LINGUAL = 'MultiLingual'
135
+ MULTI_TURN = 'MultiTurn'
133
136
 
134
137
 
135
138
  class FileConstants:
@@ -8,8 +8,9 @@ and report generation.
8
8
  """
9
9
 
10
10
  import os
11
+ import traceback
11
12
  from collections import defaultdict
12
- from concurrent.futures import ThreadPoolExecutor, as_completed
13
+ from concurrent.futures import ThreadPoolExecutor, TimeoutError, as_completed
13
14
  from tqdm import tqdm
14
15
  from typing import TYPE_CHECKING, Dict, List, Tuple, Union
15
16
 
@@ -17,6 +18,7 @@ from evalscope.api.dataset import Dataset, DatasetDict, Sample
17
18
  from evalscope.api.evaluator import CacheManager, Evaluator, TaskState
18
19
  from evalscope.api.metric import AggScore, SampleScore
19
20
  from evalscope.report import Report, gen_table
21
+ from evalscope.utils.logger import get_logger
20
22
 
21
23
  if TYPE_CHECKING:
22
24
  from evalscope.api.benchmark import DataAdapter
@@ -24,8 +26,6 @@ if TYPE_CHECKING:
24
26
  from evalscope.config import TaskConfig
25
27
  from evalscope.utils.io_utils import OutputsStructure
26
28
 
27
- from evalscope.utils.logger import get_logger
28
-
29
29
  logger = get_logger()
30
30
 
31
31
 
@@ -104,6 +104,9 @@ class DefaultEvaluator(Evaluator):
104
104
 
105
105
  # Generate the report based on aggregated scores
106
106
  report = self.get_report(agg_score_dict)
107
+
108
+ # Finalize the evaluation process
109
+ self.finalize()
107
110
  return report
108
111
 
109
112
  def evaluate_subset(self, subset: str, dataset: Dataset) -> List[AggScore]:
@@ -186,7 +189,10 @@ class DefaultEvaluator(Evaluator):
186
189
  logger.debug(f'Model result: \n{model_result.pretty_print()}')
187
190
 
188
191
  except Exception as exc:
189
- logger.error(f'{sample.model_dump_json(indent=2)} prediction failed: due to {exc}')
192
+ tb_str = traceback.format_exc()
193
+ logger.error(
194
+ f'{sample.model_dump_json(indent=2)} prediction failed: due to {exc}\nTraceback:\n{tb_str}'
195
+ )
190
196
  if self.task_config.ignore_errors:
191
197
  logger.warning('Error ignored, continuing with next sample.')
192
198
  else:
@@ -253,7 +259,13 @@ class DefaultEvaluator(Evaluator):
253
259
  for future in as_completed(future_to_task_state):
254
260
  task_state = future_to_task_state[future]
255
261
  try:
256
- sample_score = future.result()
262
+ try:
263
+ sample_score = future.result()
264
+ except TimeoutError:
265
+ logger.warning(
266
+ f'Timeout when reviewing sample {task_state.sample_id}, setting score to zero.'
267
+ )
268
+ sample_score = SampleScore(sample_id=task_state.sample_id, scores={})
257
269
  sample_score_list.append(sample_score)
258
270
 
259
271
  # Save the review result to cache for future use
@@ -266,7 +278,10 @@ class DefaultEvaluator(Evaluator):
266
278
  logger.debug(f'Review result: \n{review_result.pretty_print()}')
267
279
 
268
280
  except Exception as exc:
269
- logger.error(f'Error when review sample {task_state.sample_id}: due to {exc}')
281
+ tb_str = traceback.format_exc()
282
+ logger.error(
283
+ f'Error when review sample {task_state.sample_id}: due to {exc}\nTraceback:\n{tb_str}'
284
+ )
270
285
  if self.task_config.ignore_errors:
271
286
  logger.warning('Error ignored, continuing with next sample.')
272
287
  else:
@@ -319,7 +334,7 @@ class DefaultEvaluator(Evaluator):
319
334
 
320
335
  # Generate and display a summary table of results
321
336
  try:
322
- report_table = gen_table(report_list=[report], add_overall_metric=True)
337
+ report_table = gen_table(report_list=[report], add_overall_metric=self.benchmark.add_overall_metric)
323
338
  logger.info(f'\n{self.benchmark_name} report table:'
324
339
  f'\n{report_table} \n')
325
340
  except Exception:
@@ -337,3 +352,6 @@ class DefaultEvaluator(Evaluator):
337
352
  report.to_json(report_file)
338
353
  logger.info(f'Dump report to: {report_file} \n')
339
354
  return report
355
+
356
+ def finalize(self, *args, **kwargs):
357
+ self.benchmark.finalize(*args, **kwargs)
@@ -6,11 +6,19 @@ from evalscope.api.registry import register_aggregation, register_metric
6
6
  from .metrics import mean
7
7
 
8
8
 
9
+ def normalize_text(text: str) -> str:
10
+ """Normalize text by lowering case and stripping whitespace."""
11
+ return text.strip().lower()
12
+
13
+
9
14
  @register_metric(name='exact_match')
10
15
  class ExactMatch(Metric):
11
16
 
12
17
  def apply(self, predictions, references):
13
- return [float(prediction == reference) for prediction, reference in zip(predictions, references)]
18
+ return [
19
+ float(normalize_text(prediction) == normalize_text(reference))
20
+ for prediction, reference in zip(predictions, references)
21
+ ]
14
22
 
15
23
 
16
24
  @register_metric(name='acc')
@@ -202,6 +210,9 @@ class Mean(Aggregator):
202
210
 
203
211
  name = 'mean'
204
212
 
213
+ def agg_func(self, values: List[float]) -> float:
214
+ return mean(values)
215
+
205
216
  def __call__(self, scores: List[SampleScore]) -> List[AggScore]:
206
217
  """Aggregate scores by computing the mean for each metric.
207
218
 
@@ -230,7 +241,7 @@ class Mean(Aggregator):
230
241
  if values: # Only process non-empty value lists
231
242
  aggregated_scores.append(
232
243
  AggScore(
233
- score=mean(values),
244
+ score=self.agg_func(values),
234
245
  metric_name=metric_name,
235
246
  aggregation_name=self.name,
236
247
  num=len(values),
@@ -241,6 +252,20 @@ class Mean(Aggregator):
241
252
  return aggregated_scores
242
253
 
243
254
 
255
+ @register_aggregation(name='clipped_mean')
256
+ class ClippedMean(Mean):
257
+
258
+ name = 'clipped_mean'
259
+
260
+ def __init__(self, clip_min: float = 0.0, clip_max: float = 1.0):
261
+ self.clip_min = clip_min
262
+ self.clip_max = clip_max
263
+
264
+ def agg_func(self, values: List[float]) -> float:
265
+ clipped_values = min(max(mean(values), self.clip_min), self.clip_max)
266
+ return clipped_values
267
+
268
+
244
269
  @register_aggregation(name='pass_at_k')
245
270
  class PassAtK(Aggregator):
246
271
 
@@ -28,7 +28,7 @@ def server() -> type[ModelAPI]:
28
28
 
29
29
  @register_model_api(name='llm_ckpt')
30
30
  def llm_ckpt() -> type[ModelAPI]:
31
- check_import('torch', package='torch', raise_error=True)
31
+ check_import('torch', package='torch', raise_error=True, feature_name='llm_ckpt')
32
32
 
33
33
  from .modelscope import ModelScopeAPI
34
34
 
@@ -38,7 +38,7 @@ def llm_ckpt() -> type[ModelAPI]:
38
38
  @register_model_api(name='checkpoint')
39
39
  @deprecated(since='1.0.0', remove_in='1.1.0', alternative='llm_ckpt')
40
40
  def checkpoint() -> type[ModelAPI]:
41
- check_import('torch', package='torch', raise_error=True)
41
+ check_import('torch', package='torch', raise_error=True, feature_name='llm_ckpt')
42
42
 
43
43
  from .modelscope import ModelScopeAPI
44
44
 
@@ -47,9 +47,10 @@ def checkpoint() -> type[ModelAPI]:
47
47
 
48
48
  @register_model_api(name='text2image')
49
49
  def text2image() -> type[ModelAPI]:
50
- check_import('torch', package='evalscope[aigc]', raise_error=True)
51
- check_import('torchvision', package='evalscope[aigc]', raise_error=True)
52
- check_import('diffusers', package='evalscope[aigc]', raise_error=True)
50
+ check_import(['torch', 'torchvision', 'diffusers'],
51
+ package='evalscope[aigc]',
52
+ raise_error=True,
53
+ feature_name='text2image')
53
54
 
54
55
  from .text2image_model import Text2ImageAPI
55
56
 
@@ -58,9 +59,10 @@ def text2image() -> type[ModelAPI]:
58
59
 
59
60
  @register_model_api(name='image_editing')
60
61
  def image_editing() -> type[ModelAPI]:
61
- check_import('torch', package='evalscope[aigc]', raise_error=True)
62
- check_import('torchvision', package='evalscope[aigc]', raise_error=True)
63
- check_import('diffusers', package='evalscope[aigc]', raise_error=True)
62
+ check_import(['torch', 'torchvision', 'diffusers'],
63
+ package='evalscope[aigc]',
64
+ raise_error=True,
65
+ feature_name='image_editing')
64
66
 
65
67
  from .image_edit_model import ImageEditAPI
66
68
 
@@ -104,10 +104,9 @@ def openai_chat_completion_part(content: Content) -> ChatCompletionContentPartPa
104
104
  )
105
105
  elif content.type == 'audio':
106
106
  audio_data_uri = file_as_data_uri(content.audio)
107
- audio_data = audio_data_uri.split('base64,')[1]
108
107
 
109
108
  return ChatCompletionContentPartInputAudioParam(
110
- type='input_audio', input_audio=dict(data=audio_data, format=content.format)
109
+ type='input_audio', input_audio=dict(data=audio_data_uri, format=content.format)
111
110
  )
112
111
 
113
112
  else:
@@ -55,6 +55,7 @@ class Arguments(BaseArgument):
55
55
  image_height: int = 224 # Height of the image for random VL dataset
56
56
  image_format: str = 'RGB' # Image format for random VL dataset
57
57
  image_num: int = 1 # Number of images for random VL dataset
58
+ image_patch_size: int = 28 # Patch size for image tokenizer, only for local image token calculation
58
59
 
59
60
  # Dataset settings
60
61
  dataset: str = 'openqa' # Dataset type (default: 'line_by_line')
@@ -171,6 +172,7 @@ def add_argument(parser: argparse.ArgumentParser):
171
172
  parser.add_argument('--image-height', type=int, default=224, help='Height of the image for random VL dataset')
172
173
  parser.add_argument('--image-format', type=str, default='RGB', help='Image format for random VL dataset')
173
174
  parser.add_argument('--image-num', type=int, default=1, help='Number of images for random VL dataset')
175
+ parser.add_argument('--image-patch-size', type=int, default=28, help='Patch size for image tokenizer, only for local image token calculation') # noqa: E501
174
176
 
175
177
  # Output settings
176
178
  parser.add_argument('--outputs-dir', help='Outputs dir.', default='outputs')
@@ -43,7 +43,7 @@ class ApiPluginBase:
43
43
 
44
44
  @abstractmethod
45
45
  async def process_request(self, client_session: aiohttp.ClientSession, url: str, headers: Dict,
46
- body: Dict) -> AsyncGenerator[Tuple[bool, int, str], None]:
46
+ body: Dict) -> AsyncGenerator[Tuple[bool, int, Any], None]:
47
47
  """Process the HTTP request and handle the response.
48
48
 
49
49
  Args:
@@ -53,7 +53,7 @@ class ApiPluginBase:
53
53
  body: The request body
54
54
 
55
55
  Yields:
56
- Tuple[bool, int, str]: (is_error, status_code, response_data)
56
+ Tuple[bool, int, Any]: (is_error, status_code, response_data)
57
57
  """
58
58
  raise NotImplementedError
59
59
 
@@ -18,7 +18,7 @@ class DefaultApiPlugin(ApiPluginBase):
18
18
  super().__init__(param)
19
19
 
20
20
  async def process_request(self, client_session: aiohttp.ClientSession, url: str, headers: Dict,
21
- body: Dict) -> AsyncGenerator[Tuple[bool, int, str], None]:
21
+ body: Dict) -> AsyncGenerator[Tuple[bool, int, Any], None]:
22
22
  """Process the HTTP request and handle the response.
23
23
 
24
24
  Args:
@@ -28,7 +28,7 @@ class DefaultApiPlugin(ApiPluginBase):
28
28
  body: The request body
29
29
 
30
30
  Yields:
31
- Tuple[bool, int, str]: (is_error, status_code, response_data)
31
+ Tuple[bool, int, Any]: (is_error, status_code, response_data)
32
32
  """
33
33
  try:
34
34
  headers = {'Content-Type': 'application/json', **headers}
@@ -40,7 +40,7 @@ class DefaultApiPlugin(ApiPluginBase):
40
40
  logger.error(f'Error in process_request: {e}')
41
41
  yield (True, None, str(e))
42
42
 
43
- async def _handle_stream(self, response: aiohttp.ClientResponse) -> AsyncGenerator[Tuple[bool, int, str], None]:
43
+ async def _handle_stream(self, response: aiohttp.ClientResponse) -> AsyncGenerator[Tuple[bool, int, Any], None]:
44
44
  """Handle streaming response from server-sent events.
45
45
 
46
46
  Args:
@@ -71,14 +71,14 @@ class DefaultApiPlugin(ApiPluginBase):
71
71
  logger.error(f'Error in _handle_stream: {e}')
72
72
  yield True, response.status, str(e)
73
73
 
74
- async def _handle_response(self, response: aiohttp.ClientResponse) -> AsyncGenerator[Tuple[bool, int, str], None]:
74
+ async def _handle_response(self, response: aiohttp.ClientResponse) -> AsyncGenerator[Tuple[bool, int, Any], None]:
75
75
  """Handle the HTTP response based on content type and status.
76
76
 
77
77
  Args:
78
78
  response: The aiohttp response object
79
79
 
80
80
  Yields:
81
- Tuple[bool, int, str]: (is_error, status_code, response_data)
81
+ Tuple[bool, int, Any]: (is_error, status_code, response_data)
82
82
  """
83
83
  response_status = response.status
84
84
  response_content_type = response.content_type
@@ -94,7 +94,7 @@ class DefaultApiPlugin(ApiPluginBase):
94
94
  # Handle successful response with 'application/json' content type
95
95
  elif content_type_json in response_content_type:
96
96
  content = await response.json()
97
- yield (False, response_status, json.dumps(content, ensure_ascii=False))
97
+ yield (False, response_status, content)
98
98
  # Handle other successful responses
99
99
  else:
100
100
  content = await response.read()
@@ -102,4 +102,4 @@ class DefaultApiPlugin(ApiPluginBase):
102
102
  else:
103
103
  # error is always in JSON format
104
104
  error = await response.json()
105
- yield (True, response_status, json.dumps(error, ensure_ascii=False))
105
+ yield (True, response_status, error)
@@ -1,10 +1,13 @@
1
1
  import json
2
+ import math
2
3
  import os
4
+ from collections import defaultdict
3
5
  from typing import Any, Dict, List, Tuple, Union
4
6
 
5
7
  from evalscope.perf.arguments import Arguments
6
8
  from evalscope.perf.plugin.api.default_api import DefaultApiPlugin
7
9
  from evalscope.perf.plugin.registry import register_api
10
+ from evalscope.utils.io_utils import base64_to_PIL
8
11
  from evalscope.utils.logger import get_logger
9
12
 
10
13
  logger = get_logger()
@@ -113,7 +116,7 @@ class OpenaiPlugin(DefaultApiPlugin):
113
116
  return input_tokens, output_tokens
114
117
 
115
118
  # no usage information in the response, parse the response to get the tokens
116
- delta_contents = {}
119
+ delta_contents = defaultdict(list)
117
120
  for response in responses:
118
121
  if 'object' in response:
119
122
  self.__process_response_object(response, delta_contents)
@@ -123,41 +126,46 @@ class OpenaiPlugin(DefaultApiPlugin):
123
126
  input_tokens, output_tokens = self.__calculate_tokens_from_content(request, delta_contents)
124
127
  return input_tokens, output_tokens
125
128
 
126
- def __process_response_object(self, js, delta_contents):
127
- if js['object'] == 'chat.completion':
128
- for choice in js['choices']:
129
+ def __process_response_object(self, response, delta_contents):
130
+ if not response.get('choices'):
131
+ return
132
+ if response['object'] == 'chat.completion':
133
+ for choice in response['choices']:
129
134
  delta_contents[choice['index']] = [choice['message']['content']]
130
- elif js['object'] == 'text_completion':
131
- for choice in js['choices']:
132
- delta_contents[choice['index']] = [choice['text']]
133
- elif js['object'] == 'chat.completion.chunk':
134
- for choice in js.get('choices', []):
135
+ elif response['object'] == 'text_completion':
136
+ for choice in response['choices']:
137
+ if 'text' in choice and 'index' in choice:
138
+ delta_contents[choice['index']].append(choice['text'])
139
+ elif response['object'] == 'chat.completion.chunk':
140
+ for choice in response['choices']:
135
141
  if 'delta' in choice and 'index' in choice:
136
142
  delta = choice['delta']
137
143
  idx = choice['index']
138
144
  if 'content' in delta:
139
- delta_content = delta['content']
140
- delta_contents.setdefault(idx, []).append(delta_content)
145
+ delta_contents[idx].append(delta['content'])
141
146
 
142
- def __process_no_object(self, js, delta_contents):
147
+ def __process_no_object(self, response, delta_contents):
143
148
  # assume the response is a single choice
144
- for choice in js['choices']:
149
+ if not response.get('choices'):
150
+ return
151
+ for choice in response['choices']:
145
152
  if 'delta' in choice:
146
153
  delta = choice['delta']
147
154
  idx = choice['index']
148
155
  if 'content' in delta:
149
- delta_content = delta['content']
150
- delta_contents.setdefault(idx, []).append(delta_content)
156
+ delta_contents[idx].append(delta['content'])
151
157
  else:
152
158
  delta_contents[choice['index']] = [choice['message']['content']]
153
159
 
154
- def __calculate_tokens_from_content(self, request, delta_contents):
160
+ def __calculate_tokens_from_content(self, request, content):
155
161
  input_tokens = output_tokens = 0
156
162
  if self.tokenizer is not None:
157
- for idx, choice_contents in delta_contents.items():
163
+ # Calculate input tokens
164
+ input_tokens += self._count_input_tokens(request)
165
+ for idx, choice_contents in content.items():
158
166
  full_response_content = ''.join(choice_contents)
159
- input_tokens += len(self.tokenizer.encode(request['messages'][0]['content']))
160
- output_tokens += len(self.tokenizer.encode(full_response_content))
167
+ # Calculate output tokens
168
+ output_tokens += self._count_output_tokens(full_response_content)
161
169
  else:
162
170
  raise ValueError(
163
171
  'Error: Unable to retrieve usage information\n\n'
@@ -171,3 +179,59 @@ class OpenaiPlugin(DefaultApiPlugin):
171
179
  'please open an issue on our GitHub repository https://github.com/modelscope/evalscope .'
172
180
  )
173
181
  return input_tokens, output_tokens
182
+
183
+ def _count_input_tokens(self, request: Dict) -> int:
184
+ """Count the number of input tokens in the request.
185
+
186
+ This method handles different types of requests and calculates tokens for:
187
+ - Text content in messages or prompts
188
+ - Images in multimodal messages (converted to patch tokens)
189
+
190
+ Args:
191
+ request (Dict): The request dictionary containing either 'messages' for chat
192
+ completion or 'prompt' for text completion.
193
+
194
+ Returns:
195
+ int: The total number of input tokens including text and image tokens.
196
+ """
197
+ input_tokens = 0
198
+ if 'messages' in request:
199
+ input_content = self.tokenizer.apply_chat_template(
200
+ request['messages'], tokenize=True, add_generation_prompt=True
201
+ )
202
+ input_tokens += len(input_content)
203
+ # handle image tokens if any
204
+ for message in request['messages']:
205
+ content = message.get('content', '')
206
+ if isinstance(content, str):
207
+ continue
208
+ for cont in content:
209
+ if cont['type'] == 'image_url':
210
+ try:
211
+ # assuming image_url is base64 string
212
+ image_base64 = cont['image_url']['url']
213
+ image = base64_to_PIL(image_base64)
214
+ # Use math.ceil for more accurate token count when image dimensions
215
+ # aren't perfectly divisible by patch size
216
+ n_patches = (
217
+ math.ceil(image.height / self.param.image_patch_size)
218
+ * math.ceil(image.width / self.param.image_patch_size)
219
+ )
220
+ input_tokens += n_patches
221
+ except Exception as e:
222
+ logger.warning(f'Failed to process image for token counting: {e}')
223
+ # Continue processing other content without failing
224
+ elif 'prompt' in request:
225
+ input_tokens += len(self.tokenizer.encode(request['prompt'], add_special_tokens=False))
226
+ return input_tokens
227
+
228
+ def _count_output_tokens(self, response: str) -> int:
229
+ """Count the number of output tokens in the response. Only string response is supported.
230
+
231
+ Args:
232
+ response (str): The API response text.
233
+
234
+ Returns:
235
+ int: The number of output tokens.
236
+ """
237
+ return len(self.tokenizer.encode(response, add_special_tokens=False))
@@ -22,7 +22,7 @@ class FlickrDatasetPlugin(DatasetPluginBase):
22
22
  for item in dataset:
23
23
  pil_image = item['jpg']
24
24
  text = item['txt']
25
- base64_image = PIL_to_base64(pil_image)
25
+ base64_image = PIL_to_base64(pil_image, add_header=True)
26
26
 
27
- message = self.create_message(text=text, image_urls=f'data:image/jpeg;base64,{base64_image}')
27
+ message = self.create_message(text=text, image_urls=base64_image)
28
28
  yield [message]
@@ -22,7 +22,7 @@ class KontextDatasetPlugin(DatasetPluginBase):
22
22
  for item in dataset:
23
23
  pil_image = item['image']
24
24
  text = item['instruction']
25
- base64_image = PIL_to_base64(pil_image)
25
+ base64_image = PIL_to_base64(pil_image, add_header=True)
26
26
 
27
- message = self.create_message(text=text, image_urls=f'data:image/jpeg;base64,{base64_image}')
27
+ message = self.create_message(text=text, image_urls=base64_image)
28
28
  yield [message]
@@ -31,7 +31,7 @@ class RandomVLDatasetPlugin(RandomDatasetPlugin):
31
31
  # Generate random images based on image_num
32
32
  images_b64 = []
33
33
  for _ in range(self.image_num):
34
- images_b64.append(f'data:image/png;base64,{self._generate_random_image_b64()}')
34
+ images_b64.append(self._generate_random_image_b64())
35
35
 
36
36
  message = self.create_message(text=prompt, image_urls=images_b64)
37
37
  yield [message]
@@ -77,4 +77,4 @@ class RandomVLDatasetPlugin(RandomDatasetPlugin):
77
77
  draw.line(coords, fill=shape_color, width=random.randint(1, 5))
78
78
 
79
79
  # Convert to base64
80
- return PIL_to_base64(image, format='PNG')
80
+ return PIL_to_base64(image, format='PNG', add_header=True)
@@ -44,8 +44,7 @@ class BenchmarkData:
44
44
  api_plugin.parse_responses(self.response_messages, request=self.request)
45
45
 
46
46
  def update_gpu_usage(self):
47
- if check_import('torch'):
48
-
47
+ if check_import('torch', raise_warning=False):
49
48
  import torch
50
49
  total_memory = 0
51
50
  for i in range(torch.cuda.device_count()):