evalscope 1.0.1__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (155) hide show
  1. evalscope/api/benchmark/adapters/default_data_adapter.py +18 -4
  2. evalscope/api/benchmark/adapters/multi_choice_adapter.py +5 -2
  3. evalscope/api/benchmark/adapters/text2image_adapter.py +5 -4
  4. evalscope/api/benchmark/adapters/vision_language_adapter.py +3 -1
  5. evalscope/api/benchmark/benchmark.py +27 -2
  6. evalscope/api/benchmark/meta.py +3 -0
  7. evalscope/api/evaluator/evaluator.py +5 -0
  8. evalscope/api/evaluator/state.py +5 -0
  9. evalscope/api/messages/chat_message.py +6 -1
  10. evalscope/api/mixin/__init__.py +1 -0
  11. evalscope/api/mixin/llm_judge_mixin.py +2 -0
  12. evalscope/api/mixin/sandbox_mixin.py +204 -0
  13. evalscope/api/model/generate_config.py +0 -3
  14. evalscope/api/model/model.py +1 -1
  15. evalscope/api/tool/tool_info.py +1 -1
  16. evalscope/app/ui/multi_model.py +6 -1
  17. evalscope/app/ui/single_model.py +8 -2
  18. evalscope/app/utils/data_utils.py +3 -2
  19. evalscope/app/utils/visualization.py +2 -2
  20. evalscope/arguments.py +6 -0
  21. evalscope/benchmarks/ai2d/ai2d_adapter.py +54 -0
  22. evalscope/benchmarks/amc/__init__.py +0 -0
  23. evalscope/benchmarks/amc/amc_adapter.py +46 -0
  24. evalscope/benchmarks/bbh/bbh_adapter.py +43 -17
  25. evalscope/benchmarks/bfcl/bfcl_adapter.py +106 -2
  26. evalscope/benchmarks/bfcl/generation.py +7 -7
  27. evalscope/benchmarks/blink/__init__.py +0 -0
  28. evalscope/benchmarks/blink/blink_adapter.py +61 -0
  29. evalscope/benchmarks/chartqa/__init__.py +0 -0
  30. evalscope/benchmarks/chartqa/chartqa_adapter.py +80 -0
  31. evalscope/benchmarks/chartqa/utils.py +38 -0
  32. evalscope/benchmarks/docvqa/__init__.py +0 -0
  33. evalscope/benchmarks/docvqa/docvqa_adapter.py +67 -0
  34. evalscope/benchmarks/drop/drop_adapter.py +1 -1
  35. evalscope/benchmarks/general_arena/utils.py +2 -1
  36. evalscope/benchmarks/healthbench/__init__.py +0 -0
  37. evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
  38. evalscope/benchmarks/healthbench/utils.py +102 -0
  39. evalscope/benchmarks/hle/hle_adapter.py +3 -2
  40. evalscope/benchmarks/humaneval/humaneval_adapter.py +19 -35
  41. evalscope/benchmarks/humaneval/utils.py +235 -0
  42. evalscope/benchmarks/infovqa/__init__.py +0 -0
  43. evalscope/benchmarks/infovqa/infovqa_adapter.py +66 -0
  44. evalscope/benchmarks/live_code_bench/evaluate_utils.py +13 -6
  45. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +60 -37
  46. evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
  47. evalscope/benchmarks/math_500/math_500_adapter.py +0 -1
  48. evalscope/benchmarks/minerva_math/__init__.py +0 -0
  49. evalscope/benchmarks/minerva_math/minerva_math_adapter.py +48 -0
  50. evalscope/benchmarks/mm_bench/__init__.py +0 -0
  51. evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
  52. evalscope/benchmarks/mm_star/__init__.py +0 -0
  53. evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
  54. evalscope/benchmarks/mmmu/mmmu_adapter.py +1 -1
  55. evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +4 -9
  56. evalscope/benchmarks/multi_if/__init__.py +0 -0
  57. evalscope/benchmarks/multi_if/ifeval.py +3354 -0
  58. evalscope/benchmarks/multi_if/metrics.py +120 -0
  59. evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
  60. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +1 -4
  61. evalscope/benchmarks/ocr_bench/__init__.py +0 -0
  62. evalscope/benchmarks/ocr_bench/ocr_bench_adapter.py +101 -0
  63. evalscope/benchmarks/ocr_bench_v2/IoUscore_metric.py +87 -0
  64. evalscope/benchmarks/ocr_bench_v2/TEDS_metric.py +963 -0
  65. evalscope/benchmarks/ocr_bench_v2/__init__.py +0 -0
  66. evalscope/benchmarks/ocr_bench_v2/ocr_bench_v2_adapter.py +161 -0
  67. evalscope/benchmarks/ocr_bench_v2/page_ocr_metric.py +50 -0
  68. evalscope/benchmarks/ocr_bench_v2/parallel.py +46 -0
  69. evalscope/benchmarks/ocr_bench_v2/spotting_eval/__init__.py +0 -0
  70. evalscope/benchmarks/ocr_bench_v2/spotting_eval/readme.txt +26 -0
  71. evalscope/benchmarks/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +537 -0
  72. evalscope/benchmarks/ocr_bench_v2/spotting_eval/script.py +481 -0
  73. evalscope/benchmarks/ocr_bench_v2/spotting_metric.py +179 -0
  74. evalscope/benchmarks/ocr_bench_v2/utils.py +432 -0
  75. evalscope/benchmarks/ocr_bench_v2/vqa_metric.py +254 -0
  76. evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
  77. evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
  78. evalscope/benchmarks/olympiad_bench/utils.py +565 -0
  79. evalscope/benchmarks/omni_bench/__init__.py +0 -0
  80. evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
  81. evalscope/benchmarks/real_world_qa/__init__.py +0 -0
  82. evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
  83. evalscope/benchmarks/tau_bench/tau_bench_adapter.py +6 -1
  84. evalscope/config.py +24 -1
  85. evalscope/constants.py +3 -0
  86. evalscope/evaluator/evaluator.py +25 -7
  87. evalscope/metrics/metric.py +78 -2
  88. evalscope/metrics/metrics.py +16 -0
  89. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +0 -0
  90. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +0 -0
  91. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +0 -0
  92. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +0 -0
  93. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +0 -0
  94. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +0 -0
  95. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +0 -0
  96. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +0 -0
  97. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +0 -0
  98. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +0 -0
  99. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +2 -6
  100. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +2 -6
  101. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +2 -6
  102. evalscope/models/model_apis.py +10 -8
  103. evalscope/models/utils/openai.py +1 -2
  104. evalscope/perf/arguments.py +2 -0
  105. evalscope/perf/plugin/api/base.py +2 -2
  106. evalscope/perf/plugin/api/default_api.py +7 -7
  107. evalscope/perf/plugin/api/openai_api.py +83 -19
  108. evalscope/perf/plugin/datasets/flickr8k.py +2 -2
  109. evalscope/perf/plugin/datasets/kontext_bench.py +2 -2
  110. evalscope/perf/plugin/datasets/random_vl_dataset.py +2 -2
  111. evalscope/perf/utils/benchmark_util.py +1 -2
  112. evalscope/report/__init__.py +9 -1
  113. evalscope/report/combinator.py +45 -20
  114. evalscope/report/report.py +8 -4
  115. evalscope/run.py +1 -1
  116. evalscope/utils/function_utils.py +41 -0
  117. evalscope/utils/import_utils.py +63 -13
  118. evalscope/utils/io_utils.py +19 -11
  119. evalscope/utils/json_schema.py +25 -2
  120. evalscope/utils/logger.py +19 -0
  121. evalscope/utils/model_utils.py +1 -1
  122. evalscope/utils/multi_choices.py +16 -1
  123. evalscope/version.py +2 -2
  124. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/METADATA +10 -40
  125. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/RECORD +120 -95
  126. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/top_level.txt +0 -1
  127. tests/__init__.py +0 -1
  128. tests/benchmark/__init__.py +0 -1
  129. tests/benchmark/test_eval.py +0 -385
  130. tests/benchmark/test_image_edit.py +0 -65
  131. tests/benchmark/test_t2i.py +0 -142
  132. tests/benchmark/test_vlm.py +0 -80
  133. tests/cli/__init__.py +0 -1
  134. tests/cli/test_all.py +0 -269
  135. tests/cli/test_collection.py +0 -99
  136. tests/cli/test_custom.py +0 -268
  137. tests/cli/test_reasoning.py +0 -81
  138. tests/common.py +0 -73
  139. tests/perf/__init__.py +0 -1
  140. tests/perf/test_perf.py +0 -178
  141. tests/rag/test_clip_benchmark.py +0 -87
  142. tests/rag/test_mteb.py +0 -213
  143. tests/rag/test_ragas.py +0 -128
  144. tests/swift/__init__.py +0 -1
  145. tests/swift/test_run_swift_eval.py +0 -146
  146. tests/swift/test_run_swift_vlm_eval.py +0 -128
  147. tests/swift/test_run_swift_vlm_jugde_eval.py +0 -157
  148. tests/test_run_all.py +0 -12
  149. tests/utils.py +0 -13
  150. tests/vlm/__init__.py +0 -1
  151. tests/vlm/test_vlmeval.py +0 -102
  152. {tests/rag → evalscope/benchmarks/ai2d}/__init__.py +0 -0
  153. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/LICENSE +0 -0
  154. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/WHEEL +0 -0
  155. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/entry_points.txt +0 -0
@@ -4,7 +4,6 @@ from typing import Any, Dict
4
4
 
5
5
  from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
6
6
  from evalscope.api.dataset import Sample
7
- from evalscope.api.evaluator import TaskState
8
7
  from evalscope.api.registry import register_benchmark
9
8
  from evalscope.constants import Tags
10
9
  from evalscope.utils.logger import get_logger
File without changes
@@ -0,0 +1,48 @@
1
+ from typing import Any, Dict
2
+
3
+ from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
4
+ from evalscope.api.dataset import Sample
5
+ from evalscope.api.registry import register_benchmark
6
+ from evalscope.constants import Tags
7
+ from evalscope.utils.logger import get_logger
8
+
9
+ logger = get_logger()
10
+
11
+
12
+ @register_benchmark(
13
+ BenchmarkMeta(
14
+ name='minerva_math',
15
+ pretty_name='Minerva-Math',
16
+ tags=[Tags.MATH, Tags.REASONING],
17
+ description='Minerva-math is a benchmark designed to evaluate the mathematical and quantitative '
18
+ 'reasoning capabilities of LLMs. It consists of **272 problems** '
19
+ 'sourced primarily from **MIT OpenCourseWare** '
20
+ 'courses, covering advanced STEM subjects such as solid-state chemistry, astronomy, differential '
21
+ 'equations, and special relativity at the **university and graduate level**.',
22
+ dataset_id='knoveleng/Minerva-Math',
23
+ subset_list=['default'],
24
+ metric_list=[{
25
+ 'acc': {
26
+ 'numeric': True
27
+ }
28
+ }],
29
+ eval_split='train',
30
+ prompt_template='{question}\nPlease reason step by step, and put your final answer within \\boxed{{}}.',
31
+ )
32
+ )
33
+ class MinervaMathAdapter(DefaultDataAdapter):
34
+
35
+ def __init__(self, **kwargs):
36
+ super().__init__(**kwargs)
37
+
38
+ self._use_llm_judge = True
39
+
40
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
41
+ return Sample(
42
+ input=record['problem'],
43
+ target=record['solution'],
44
+ metadata={
45
+ 'type': record['type'],
46
+ 'idx': record['idx'],
47
+ },
48
+ )
File without changes
@@ -0,0 +1,99 @@
1
+ from typing import Any, Dict, List
2
+
3
+ from evalscope.api.benchmark import BenchmarkMeta, MultiChoiceAdapter, VisionLanguageAdapter
4
+ from evalscope.api.dataset import Sample
5
+ from evalscope.api.messages import ChatMessageUser, Content, ContentImage, ContentText
6
+ from evalscope.api.registry import register_benchmark
7
+ from evalscope.constants import Tags
8
+ from evalscope.utils.io_utils import bytes_to_base64
9
+ from evalscope.utils.logger import get_logger
10
+ from evalscope.utils.multi_choices import MultipleChoiceTemplate, prompt
11
+
12
+ logger = get_logger()
13
+
14
+ MULT_CHOICE_PROMPT = MultipleChoiceTemplate.SINGLE_ANSWER_COT
15
+
16
+
17
+ @register_benchmark(
18
+ BenchmarkMeta(
19
+ name='cc_bench',
20
+ pretty_name='CCBench',
21
+ tags=[Tags.MULTI_MODAL, Tags.KNOWLEDGE, Tags.MULTIPLE_CHOICE],
22
+ description=
23
+ 'CCBench is an extension of MMBench with newly design questions about Chinese traditional culture, including Calligraphy Painting, Cultural Relic, Food & Clothes, Historical Figures, Scenery & Building, Sketch Reasoning and Traditional Show.', # noqa: E501
24
+ dataset_id='lmms-lab/MMBench',
25
+ subset_list=['cc'],
26
+ metric_list=['acc'],
27
+ eval_split='test',
28
+ prompt_template=MULT_CHOICE_PROMPT,
29
+ )
30
+ )
31
+ class CCBenchAdapter(VisionLanguageAdapter, MultiChoiceAdapter):
32
+
33
+ def __init__(self, **kwargs):
34
+ super().__init__(**kwargs)
35
+
36
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
37
+ answers_list: List[str] = [record.get('A', ''), record.get('B', ''), record.get('C', ''), record.get('D', '')]
38
+ input_text = prompt(question=record['question'], choices=answers_list, template=self.prompt_template)
39
+ content_list: List[Content] = [ContentText(text=input_text)]
40
+ image = record.get('image')
41
+ if image:
42
+ image_base64 = bytes_to_base64(image['bytes'], format='jpeg', add_header=True)
43
+ content_list.append(ContentImage(image=image_base64))
44
+ label_answer = record.get('answer')
45
+ return Sample(
46
+ input=[ChatMessageUser(content=content_list)],
47
+ choices=answers_list,
48
+ target=label_answer,
49
+ metadata={
50
+ 'index': record.get('index'),
51
+ 'category': record.get('category'),
52
+ 'source': record.get('source')
53
+ }
54
+ )
55
+
56
+
57
+ @register_benchmark(
58
+ BenchmarkMeta(
59
+ name='mm_bench',
60
+ pretty_name='MMBench',
61
+ tags=[Tags.MULTI_MODAL, Tags.KNOWLEDGE, Tags.QA],
62
+ description=
63
+ 'MMBench is a comprehensive evaluation pipeline comprised of meticulously curated multimodal dataset and a novel circulareval strategy using ChatGPT. It is comprised of 20 ability dimensions defined by MMBench. It also contains chinese version with translated question.', # noqa: E501
64
+ dataset_id='lmms-lab/MMBench',
65
+ subset_list=['cn', 'en'],
66
+ metric_list=['acc'],
67
+ eval_split='dev',
68
+ prompt_template=MULT_CHOICE_PROMPT,
69
+ )
70
+ )
71
+ class MMBenchAdapter(VisionLanguageAdapter, MultiChoiceAdapter):
72
+
73
+ def __init__(self, **kwargs):
74
+ super().__init__(**kwargs)
75
+
76
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
77
+ answers_list: List[str] = [record.get('A', ''), record.get('B', ''), record.get('C', ''), record.get('D', '')]
78
+ answers_list = [ans for ans in answers_list if (ans.strip() and ans != 'nan')]
79
+ question_hint = record['hint'] + record['question']
80
+ input_text = prompt(question=question_hint, choices=answers_list, template=self.prompt_template)
81
+ content_list: List[Content] = [ContentText(text=input_text)]
82
+ image = record.get('image')
83
+ if image:
84
+ image_base64 = bytes_to_base64(image['bytes'], format='jpeg', add_header=True)
85
+ content_list.append(ContentImage(image=image_base64))
86
+ label_answer = record.get('answer')
87
+ return Sample(
88
+ input=[ChatMessageUser(content=content_list)],
89
+ choices=answers_list,
90
+ target=label_answer,
91
+ metadata={
92
+ 'index': record.get('index'),
93
+ 'category': record.get('category'),
94
+ 'source': record.get('source'),
95
+ 'L2-category': record.get('L2-category'),
96
+ 'comment': record.get('comment'),
97
+ 'split': record.get('split')
98
+ }
99
+ )
File without changes
@@ -0,0 +1,73 @@
1
+ import re
2
+ from typing import Any, Dict, List
3
+
4
+ from evalscope.api.benchmark import BenchmarkMeta, MultiChoiceAdapter, VisionLanguageAdapter
5
+ from evalscope.api.dataset import Sample
6
+ from evalscope.api.evaluator import TaskState
7
+ from evalscope.api.messages import ChatMessageUser, Content, ContentImage, ContentText
8
+ from evalscope.api.registry import register_benchmark
9
+ from evalscope.constants import Tags
10
+ from evalscope.utils.io_utils import bytes_to_base64
11
+ from evalscope.utils.logger import get_logger
12
+
13
+ logger = get_logger()
14
+
15
+ MULT_CHOICE_PROMPT = r"""
16
+ Answer the following multiple choice question.
17
+ The last line of your response should be of the following format:
18
+ 'ANSWER: $LETTER' (without quotes)
19
+ where LETTER is one of A,B,C,D. Think step by step before answering.
20
+
21
+ {question}
22
+ """.strip()
23
+
24
+ SUBSET_LIST = [
25
+ 'coarse perception', 'fine-grained perception', 'instance reasoning', 'logical reasoning', 'math',
26
+ 'science & technology'
27
+ ]
28
+
29
+
30
+ @register_benchmark(
31
+ BenchmarkMeta(
32
+ name='mm_star',
33
+ pretty_name='MMStar',
34
+ tags=[Tags.MULTI_MODAL, Tags.KNOWLEDGE, Tags.MULTIPLE_CHOICE],
35
+ description=
36
+ 'MMStar: an elite vision-indispensible multi-modal benchmark, aiming to ensure each curated sample exhibits visual dependency, minimal data leakage, and requires advanced multi-modal capabilities.', # noqa: E501
37
+ dataset_id='evalscope/MMStar',
38
+ subset_list=SUBSET_LIST,
39
+ metric_list=['acc'],
40
+ default_subset='val',
41
+ eval_split='val',
42
+ prompt_template=MULT_CHOICE_PROMPT,
43
+ )
44
+ )
45
+ class MMStarAdapter(VisionLanguageAdapter, MultiChoiceAdapter):
46
+
47
+ def __init__(self, **kwargs):
48
+ super().__init__(**kwargs)
49
+
50
+ self.reformat_subset = True
51
+
52
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
53
+ input_text = MULT_CHOICE_PROMPT.format(question=record['question'])
54
+ content_list: List[Content] = [ContentText(text=input_text)]
55
+ image = record.get('image')
56
+ if image:
57
+ image_base64 = bytes_to_base64(image['bytes'], format='jpeg', add_header=True)
58
+ content_list.append(ContentImage(image=image_base64))
59
+ label_answer = record.get('answer')
60
+ return Sample(
61
+ input=[ChatMessageUser(content=content_list)],
62
+ choices=['A', 'B', 'C', 'D'],
63
+ target=label_answer,
64
+ subset_key=record.get('category'),
65
+ metadata={
66
+ 'index': record.get('index'),
67
+ 'category': record.get('category'),
68
+ 'l2_category': record.get('l2_category'),
69
+ 'source': record.get('meta_info', {}).get('source'),
70
+ 'split': record.get('meta_info', {}).get('split'),
71
+ 'image_path': record.get('meta_info', {}).get('image_path')
72
+ }
73
+ )
@@ -122,7 +122,7 @@ class MMMUAdapter(VisionLanguageAdapter):
122
122
  match = re.search(pattern, prediction)
123
123
  if match:
124
124
  return match.group(1).strip()
125
- return ''
125
+ return prediction.strip()
126
126
  else:
127
127
  raise ValueError(f'Unsupported question type: {question_type}')
128
128
 
@@ -1,15 +1,14 @@
1
1
  import ast
2
2
  from typing import Any, Dict, List
3
3
 
4
- from evalscope.api.benchmark import BenchmarkMeta, VisionLanguageAdapter
4
+ from evalscope.api.benchmark import BenchmarkMeta, MultiChoiceAdapter, VisionLanguageAdapter
5
5
  from evalscope.api.dataset import Sample
6
- from evalscope.api.evaluator import TaskState
7
6
  from evalscope.api.messages import ChatMessageUser, Content, ContentImage, ContentText
8
7
  from evalscope.api.registry import register_benchmark
9
8
  from evalscope.constants import Tags
10
9
  from evalscope.utils.io_utils import bytes_to_base64
11
10
  from evalscope.utils.logger import get_logger
12
- from evalscope.utils.multi_choices import MultipleChoiceTemplate, answer_character, parse_answers, prompt
11
+ from evalscope.utils.multi_choices import MultipleChoiceTemplate, answer_character, prompt
13
12
 
14
13
  logger = get_logger()
15
14
 
@@ -60,7 +59,7 @@ DATASET_FORMATS = ['standard (4 options)', 'standard (10 options)', 'vision']
60
59
  BenchmarkMeta(
61
60
  name='mmmu_pro',
62
61
  pretty_name='MMMU-PRO',
63
- tags=[Tags.MULTI_MODAL, Tags.KNOWLEDGE, Tags.QA],
62
+ tags=[Tags.MULTI_MODAL, Tags.KNOWLEDGE, Tags.MULTIPLE_CHOICE],
64
63
  description=
65
64
  'MMMU-Pro is an enhanced multimodal benchmark designed to rigorously assess the true understanding capabilities of advanced AI models across multiple modalities. It builds upon the original MMMU benchmark by introducing several key improvements that make it more challenging and realistic, ensuring that models are evaluated on their genuine ability to integrate and comprehend both visual and textual information.', # noqa: E501
66
65
  dataset_id='AI-ModelScope/MMMU_Pro',
@@ -73,7 +72,7 @@ DATASET_FORMATS = ['standard (4 options)', 'standard (10 options)', 'vision']
73
72
  }
74
73
  )
75
74
  )
76
- class MMMUPROAdapter(VisionLanguageAdapter):
75
+ class MMMUPROAdapter(VisionLanguageAdapter, MultiChoiceAdapter):
77
76
  MAX_IMAGES: int = 7
78
77
 
79
78
  def __init__(self, *args, **kwargs):
@@ -123,7 +122,3 @@ class MMMUPROAdapter(VisionLanguageAdapter):
123
122
  subset_key=record['subject'],
124
123
  metadata=metadata,
125
124
  )
126
-
127
- def extract_answer(self, prediction: str, task_state: TaskState) -> str:
128
- answers = parse_answers(task_state)
129
- return ''.join(sorted(list(answers)))
File without changes