evalscope 1.1.0__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (100) hide show
  1. evalscope/api/benchmark/__init__.py +8 -1
  2. evalscope/api/benchmark/adapters/__init__.py +1 -0
  3. evalscope/api/benchmark/adapters/ner_adapter.py +212 -0
  4. evalscope/api/benchmark/benchmark.py +14 -0
  5. evalscope/api/dataset/dataset.py +21 -0
  6. evalscope/api/dataset/loader.py +6 -2
  7. evalscope/api/mixin/sandbox_mixin.py +32 -54
  8. evalscope/api/model/generate_config.py +6 -0
  9. evalscope/benchmarks/aa_lcr/__init__.py +0 -0
  10. evalscope/benchmarks/aa_lcr/aa_lcr_adapter.py +205 -0
  11. evalscope/benchmarks/bfcl/bfcl_adapter.py +1 -1
  12. evalscope/benchmarks/data_collection/data_collection_adapter.py +2 -1
  13. evalscope/benchmarks/general_arena/general_arena_adapter.py +1 -1
  14. evalscope/benchmarks/general_mcq/general_mcq_adapter.py +1 -1
  15. evalscope/benchmarks/general_qa/general_qa_adapter.py +1 -1
  16. evalscope/benchmarks/gsm8k/gsm8k_adapter.py +23 -4
  17. evalscope/benchmarks/hallusion_bench/__init__.py +0 -0
  18. evalscope/benchmarks/hallusion_bench/hallusion_bench_adapter.py +158 -0
  19. evalscope/benchmarks/humaneval/humaneval_adapter.py +2 -1
  20. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +3 -1
  21. evalscope/benchmarks/math_verse/__init__.py +0 -0
  22. evalscope/benchmarks/math_verse/math_verse_adapter.py +100 -0
  23. evalscope/benchmarks/math_vision/__init__.py +0 -0
  24. evalscope/benchmarks/math_vision/math_vision_adapter.py +111 -0
  25. evalscope/benchmarks/math_vista/math_vista_adapter.py +6 -26
  26. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +1 -1
  27. evalscope/benchmarks/ner/__init__.py +0 -0
  28. evalscope/benchmarks/ner/broad_twitter_corpus_adapter.py +52 -0
  29. evalscope/benchmarks/ner/conll2003_adapter.py +48 -0
  30. evalscope/benchmarks/ner/copious_adapter.py +85 -0
  31. evalscope/benchmarks/ner/cross_ner_adapter.py +120 -0
  32. evalscope/benchmarks/ner/cross_ner_entities/__init__.py +0 -0
  33. evalscope/benchmarks/ner/cross_ner_entities/ai.py +54 -0
  34. evalscope/benchmarks/ner/cross_ner_entities/literature.py +36 -0
  35. evalscope/benchmarks/ner/cross_ner_entities/music.py +39 -0
  36. evalscope/benchmarks/ner/cross_ner_entities/politics.py +37 -0
  37. evalscope/benchmarks/ner/cross_ner_entities/science.py +58 -0
  38. evalscope/benchmarks/ner/genia_ner_adapter.py +66 -0
  39. evalscope/benchmarks/ner/harvey_ner_adapter.py +58 -0
  40. evalscope/benchmarks/ner/mit_movie_trivia_adapter.py +74 -0
  41. evalscope/benchmarks/ner/mit_restaurant_adapter.py +66 -0
  42. evalscope/benchmarks/ner/ontonotes5_adapter.py +87 -0
  43. evalscope/benchmarks/ner/wnut2017_adapter.py +61 -0
  44. evalscope/benchmarks/ocr_bench_v2/utils.py +1 -0
  45. evalscope/benchmarks/omnidoc_bench/__init__.py +0 -0
  46. evalscope/benchmarks/omnidoc_bench/end2end_eval.py +349 -0
  47. evalscope/benchmarks/omnidoc_bench/metrics.py +547 -0
  48. evalscope/benchmarks/omnidoc_bench/omnidoc_bench_adapter.py +135 -0
  49. evalscope/benchmarks/omnidoc_bench/utils.py +1937 -0
  50. evalscope/benchmarks/poly_math/__init__.py +0 -0
  51. evalscope/benchmarks/poly_math/poly_math_adapter.py +127 -0
  52. evalscope/benchmarks/poly_math/utils/instruction.py +105 -0
  53. evalscope/benchmarks/pope/__init__.py +0 -0
  54. evalscope/benchmarks/pope/pope_adapter.py +111 -0
  55. evalscope/benchmarks/seed_bench_2_plus/__init__.py +0 -0
  56. evalscope/benchmarks/seed_bench_2_plus/seed_bench_2_plus_adapter.py +72 -0
  57. evalscope/benchmarks/simple_vqa/__init__.py +0 -0
  58. evalscope/benchmarks/simple_vqa/simple_vqa_adapter.py +169 -0
  59. evalscope/benchmarks/tau_bench/tau_bench_adapter.py +1 -1
  60. evalscope/benchmarks/tool_bench/tool_bench_adapter.py +1 -1
  61. evalscope/benchmarks/visu_logic/__init__.py +0 -0
  62. evalscope/benchmarks/visu_logic/visu_logic_adapter.py +75 -0
  63. evalscope/benchmarks/zerobench/__init__.py +0 -0
  64. evalscope/benchmarks/zerobench/zerobench_adapter.py +64 -0
  65. evalscope/constants.py +4 -0
  66. evalscope/evaluator/evaluator.py +72 -79
  67. evalscope/metrics/math_parser.py +14 -0
  68. evalscope/metrics/metric.py +1 -1
  69. evalscope/models/utils/openai.py +4 -0
  70. evalscope/perf/arguments.py +24 -4
  71. evalscope/perf/benchmark.py +74 -89
  72. evalscope/perf/http_client.py +31 -16
  73. evalscope/perf/main.py +15 -2
  74. evalscope/perf/plugin/api/base.py +9 -7
  75. evalscope/perf/plugin/api/custom_api.py +13 -58
  76. evalscope/perf/plugin/api/default_api.py +179 -79
  77. evalscope/perf/plugin/api/openai_api.py +4 -3
  78. evalscope/perf/plugin/datasets/base.py +21 -0
  79. evalscope/perf/plugin/datasets/custom.py +2 -3
  80. evalscope/perf/plugin/datasets/line_by_line.py +2 -3
  81. evalscope/perf/plugin/datasets/longalpaca.py +2 -3
  82. evalscope/perf/plugin/datasets/openqa.py +2 -4
  83. evalscope/perf/plugin/datasets/random_dataset.py +1 -3
  84. evalscope/perf/utils/benchmark_util.py +36 -22
  85. evalscope/perf/utils/db_util.py +14 -19
  86. evalscope/perf/utils/local_server.py +0 -44
  87. evalscope/perf/utils/log_utils.py +21 -6
  88. evalscope/report/__init__.py +2 -1
  89. evalscope/run.py +4 -0
  90. evalscope/utils/function_utils.py +195 -12
  91. evalscope/utils/io_utils.py +74 -0
  92. evalscope/utils/logger.py +49 -17
  93. evalscope/utils/ner.py +377 -0
  94. evalscope/version.py +2 -2
  95. {evalscope-1.1.0.dist-info → evalscope-1.1.1.dist-info}/METADATA +235 -363
  96. {evalscope-1.1.0.dist-info → evalscope-1.1.1.dist-info}/RECORD +100 -55
  97. {evalscope-1.1.0.dist-info → evalscope-1.1.1.dist-info}/WHEEL +1 -1
  98. {evalscope-1.1.0.dist-info → evalscope-1.1.1.dist-info}/entry_points.txt +0 -0
  99. {evalscope-1.1.0.dist-info → evalscope-1.1.1.dist-info/licenses}/LICENSE +0 -0
  100. {evalscope-1.1.0.dist-info → evalscope-1.1.1.dist-info}/top_level.txt +0 -0
File without changes
@@ -0,0 +1,127 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
3
+ from typing import Any, Dict, List, Tuple
4
+
5
+ from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
6
+ from evalscope.api.dataset import Sample
7
+ from evalscope.api.dataset.dataset import DatasetDict, MemoryDataset
8
+ from evalscope.api.metric.scorer import AggScore, SampleScore, Score
9
+ from evalscope.api.registry import register_benchmark
10
+ from evalscope.constants import Tags
11
+ from evalscope.report.report import Report, Subset
12
+ from evalscope.utils.logger import get_logger
13
+
14
+ logger = get_logger()
15
+
16
+ SUBSET_LIST = [
17
+ 'en', 'zh', 'ar', 'bn', 'de', 'es', 'fr', 'id', 'it', 'ja', 'ko', 'ms', 'pt', 'ru', 'sw', 'te', 'th', 'vi'
18
+ ]
19
+ LEVEL_LIST = ['low', 'medium', 'high', 'top']
20
+
21
+
22
+ @register_benchmark(
23
+ BenchmarkMeta(
24
+ name='poly_math',
25
+ pretty_name='PolyMath',
26
+ tags=[Tags.MATH, Tags.REASONING, Tags.MULTI_LINGUAL],
27
+ description=
28
+ 'PolyMath is a multilingual mathematical reasoning benchmark covering 18 languages and 4 easy-to-hard difficulty levels, with 9,000 high-quality problem samples. Our benchmark ensures difficulty comprehensiveness, language diversity, and high-quality translation, making it a highly discriminative multilingual mathematical benchmark in the era of reasoning LLMs.', # noqa: E501
29
+ dataset_id='evalscope/PolyMath',
30
+ subset_list=SUBSET_LIST,
31
+ metric_list=[{
32
+ 'acc': {
33
+ 'numeric': True
34
+ }
35
+ }],
36
+ eval_split='test',
37
+ prompt_template='{question}',
38
+ )
39
+ )
40
+ class PolyMathAdapter(DefaultDataAdapter):
41
+
42
+ def __init__(self, *args, **kwargs):
43
+ super().__init__(*args, **kwargs)
44
+
45
+ def load(self) -> Tuple[DatasetDict, None]:
46
+ """Load all difficulty levels, rename subsets with their level suffix, and merge them."""
47
+ # Need to load all levels to get the full dataset
48
+ dataset_list: List[Dict[str, MemoryDataset]] = []
49
+ original_split = getattr(self, 'eval_split', None)
50
+ try:
51
+ for level in LEVEL_LIST:
52
+ self.eval_split = level
53
+ cur_level_dataset_dict, _ = super().load()
54
+ # Build a renamed mapping without mutating during iteration
55
+ renamed: Dict[str, MemoryDataset] = {
56
+ f'{subset}-{level}': ds
57
+ for subset, ds in cur_level_dataset_dict.items()
58
+ }
59
+ dataset_list.append(renamed)
60
+ finally:
61
+ # Restore original split to avoid side effects
62
+ if original_split is not None:
63
+ self.eval_split = original_split
64
+ # Merge all levels into one dataset
65
+ return DatasetDict.from_dataset_dicts(dataset_list), None
66
+
67
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
68
+ """Convert a single record into a Sample with language-specific instruction."""
69
+ from .utils.instruction import query_dic
70
+
71
+ # e.g., 'high-en-1'
72
+ question_id = record['id']
73
+ level, language, index = question_id.split('-')
74
+ # Get the instruction for the specific language
75
+ instruction = query_dic[language]
76
+
77
+ return Sample(
78
+ input=record['question'] + '\n' + instruction,
79
+ target=record['answer'],
80
+ metadata={
81
+ 'level': level,
82
+ 'language': language,
83
+ 'index': index,
84
+ },
85
+ )
86
+
87
+ def _on_generate_report_end(self, report: Report, output_dir, **kwargs):
88
+ """
89
+ Finalize the report generation process. Calculate the difficulty-weighted accuracy (DW-ACC)
90
+ per language and the overall DW-ACC, and append as a new category to each metric.
91
+ """
92
+ from evalscope.report import Category, Metric
93
+
94
+ WEIGHT_DENOMINATOR = 15 # 1 + 2 + 4 + 8 for ['low','medium','high','top']
95
+
96
+ for metric in report.metrics:
97
+ # Collect all subsets by name for easy lookup (e.g., "en-low")
98
+ subset_dict: Dict[str, Subset] = {}
99
+ for category in metric.categories:
100
+ for subset in category.subsets:
101
+ subset_dict[subset.name] = subset
102
+
103
+ # Compute per-language DW-ACC
104
+ dw_subsets: List[Subset] = []
105
+ for language in SUBSET_LIST:
106
+ weighted_sum = 0.0
107
+ total_num = 0
108
+ for i, level in enumerate(LEVEL_LIST):
109
+ s = subset_dict.get(f'{language}-{level}')
110
+ if s is not None:
111
+ weighted_sum += (2**i) * s.score
112
+ total_num += s.num
113
+ # missing subsets contribute 0 by design
114
+ if total_num == 0:
115
+ continue
116
+ dw_acc = weighted_sum / WEIGHT_DENOMINATOR
117
+ dw_subsets.append(Subset(name=language, score=dw_acc, num=total_num))
118
+
119
+ # Overall DW-ACC: unweighted average across languages
120
+ if dw_subsets:
121
+ overall_score = sum(s.score for s in dw_subsets) / len(dw_subsets)
122
+ overall_num = sum(s.num for s in dw_subsets)
123
+ dw_subsets.append(Subset(name='OVERALL', score=overall_score, num=overall_num))
124
+
125
+ # Append DW-ACC metric
126
+ if dw_subsets:
127
+ report.metrics.append(Metric(name='DW-ACC', categories=[Category(name='-', subsets=dw_subsets)]))
@@ -0,0 +1,105 @@
1
+ # flake8: noqa
2
+ language_dic = {
3
+ 'en': 'English',
4
+ 'zh': 'Chinese',
5
+ 'ar': 'Arabic',
6
+ 'bn': 'Bangali',
7
+ 'de': 'German',
8
+ 'es': 'Spanish',
9
+ 'fr': 'French',
10
+ 'id': 'Indonesian',
11
+ 'it': 'Italian',
12
+ 'ja': 'Japanese',
13
+ 'ko': 'Korean',
14
+ 'ms': 'Malay',
15
+ 'pt': 'Potuguese',
16
+ 'ru': 'Russian',
17
+ 'sw': 'Swahili',
18
+ 'te': 'Telugu',
19
+ 'th': 'Thai',
20
+ 'vi': 'Vietnamese',
21
+ }
22
+
23
+ query_dic = {
24
+ 'en': 'Note: Please put the final answer in the $\\boxed\{\}$.',
25
+ 'zh': '注意:请将最终答案放在 $\\boxed\{\}$ 中。',
26
+ 'ar': 'ملاحظة: يُرجى وضع الإجابة النهائية في $\\boxed\{\}$.',
27
+ 'bn': 'বিঃদ্রঃ: অনুগ্রহ করে চূড়ান্ত উত্তরটি $\\boxed\{\}$ এর মধ্যে রাখুন।',
28
+ 'de': 'Hinweis: Bitte setzen Sie die endgültige Antwort in $\\boxed\{\}$.',
29
+ 'es': 'Nota: Por favor, coloque la respuesta final en el $\\boxed\{\}$.',
30
+ 'fr': 'Remarque : Veuillez mettre la réponse finale dans le $\\boxed\{\}$.',
31
+ 'id': 'Catatan: Silakan letakkan jawaban akhir di dalam $\\boxed\{\}$.',
32
+ 'it': 'Nota: Per favore, metti la risposta finale nel $\\boxed\{\}$.',
33
+ 'ja': '注意:最終的な答えを $\\boxed\{\}$ に入れてください。',
34
+ 'ko': '참고: 최종 답안을 $\\boxed\{\}$ 안에 넣어 주세요.',
35
+ 'ms': 'Nota: Sila letakkan jawapan akhir dalam $\\boxed\{\}$.',
36
+ 'pt': 'Nota: Por favor, coloque a resposta final no $\\boxed\{\}$.',
37
+ 'ru': 'Примечание: Пожалуйста, поместите окончательный ответ в $\\boxed\{\}$.',
38
+ 'sw': 'Kumbuka: Tafadhali weka jibu la mwisho katika $\\boxed\{\}$.',
39
+ 'te': 'గమనిక: దయచేసి తుది జవాబును $\\boxed\{\}$ లో ఉంచండి.',
40
+ 'th': 'หมายเหตุ: กรุณาใส่คำตอบสุดท้ายใน $\\boxed\{\}$.',
41
+ 'vi': 'Lưu ý: Vui lòng đặt câu trả lời cuối cùng trong $\\boxed\{\}$.',
42
+ }
43
+
44
+ language_control = {
45
+ 'forcing_raw': {
46
+ 'en': 'Use English to think and answer.',
47
+ 'zh': '使用中文进行思考和回答。',
48
+ 'ar': 'استخدم العربية للتفكير والإجابة.',
49
+ 'bn': 'বাংলা ব্যবহার করে চিন্তা এবং উত্তর দিন।',
50
+ 'de': 'Verwende Deutsch, um zu denken und zu antworten.',
51
+ 'es': 'Usa español para pensar y responder.',
52
+ 'fr': 'Utilisez le français pour penser et répondre.',
53
+ 'id': 'Gunakan bahasa Indonesia untuk berpikir dan menjawab.',
54
+ 'it': 'Usa italiano per pensare e rispondere.',
55
+ 'ja': '日本語を使って考え、回答してください。',
56
+ 'ko': '한국어로 생각하고 답변하세요.',
57
+ 'ms': 'Gunakan bahasa Melayu untuk berfikir dan menjawab.',
58
+ 'pt': 'Use português para pensar e responder.',
59
+ 'ru': 'Используйте русский язык для размышлений и ответов.',
60
+ 'sw': 'Tumia Kiswahili kufikiri na kujibu.',
61
+ 'te': 'తెలుగును ఉపయోగించి ఆలోచించి సమాధానం ఇవ్వండి.',
62
+ 'th': 'ใช้ภาษาไทยในการคิดและตอบคำถาม.',
63
+ 'vi': 'Sử dụng tiếng Việt để suy nghĩ và trả lời.',
64
+ },
65
+ 'forcing_en': {
66
+ 'en': 'Use English to think and answer.',
67
+ 'zh': '使用英文进行思考和回答。',
68
+ 'ar': 'استخدم اللغة الإنجليزية للتفكير والإجابة.',
69
+ 'bn': 'ইংরেজি ব্যবহার করে চিন্তা এবং উত্তর দিন।',
70
+ 'de': 'Verwenden Sie Englisch, um zu denken und zu antworten.',
71
+ 'es': 'Usa inglés para pensar y responder.',
72
+ 'fr': "Utilisez l'anglais pour penser et répondre.",
73
+ 'id': 'Gunakan bahasa Inggris untuk berpikir dan menjawab.',
74
+ 'it': 'Usa inglese per pensare e rispondere.',
75
+ 'ja': '英語を使って考え、回答してください。',
76
+ 'ko': '영어로 생각하고 답변하세요.',
77
+ 'ms': 'Gunakan bahasa Inggeris untuk berfikir dan menjawab.',
78
+ 'pt': 'Use inglês para pensar e responder.',
79
+ 'ru': 'Используйте английский язык, чтобы думать и отвечать.',
80
+ 'sw': 'Tumia Kiingereza kufikiri na kujibu.',
81
+ 'te': 'ఇంగ్లీష్‌ను ఉపయోగించి ఆలోచించి ఉత్తరించండి.',
82
+ 'th': 'ใช้ภาษาอังกฤษในการคิดและตอบคำถาม.',
83
+ 'vi': 'Sử dụng tiếng Anh để suy nghĩ và trả lời.',
84
+ },
85
+ 'forcing_prefer': {
86
+ 'en': 'Choose the language you are most proficient in to think and answer.',
87
+ 'zh': '自选一种你最擅长的语言进行思考和回答。',
88
+ 'ar': 'اختر اللغة التي تجيدها أكثر للتفكير والإجابة.',
89
+ 'bn': 'আপনি যে ভাষাটি সবচেয়ে পারদর্শী সেটি বেছে নিয়ে চিন্তা এবং উত্তর দিন।',
90
+ 'de': 'Wählen Sie die Sprache, in der Sie am kompetentesten sind, um zu denken und zu antworten.',
91
+ 'es': 'Elige el idioma en el que eres más competente para pensar y responder.',
92
+ 'fr': 'Choisissez la langue dans laquelle vous êtes le plus compétent pour penser et répondre.',
93
+ 'id': 'Pilih bahasa yang paling Anda kuasai untuk berpikir dan menjawab.',
94
+ 'it': 'Scegli la lingua in cui sei più competente per pensare e rispondere.',
95
+ 'ja': '最も得意な言語を選んで考え、回答してください。',
96
+ 'ko': '가장 능숙한 언어를 선택하여 생각하고 답변하세요.',
97
+ 'ms': 'Pilih bahasa yang paling anda mahir untuk berfikir dan menjawab.',
98
+ 'pt': 'Escolha o idioma em que você é mais competente para pensar e responder.',
99
+ 'ru': 'Выберите язык, в котором вы наиболее компетентны, чтобы думать и отвечать.',
100
+ 'sw': 'Chagua lugha ambayo unamudu zaidi kufikiri na kujibu.',
101
+ 'te': 'మీరు అత్యంత స్థిరంగా ఉన్న భాషను స్వీకరించి ఆలోచించిตอบ。',
102
+ 'th': 'เลือกภาษาที่คุณมีความสามารถมากที่สุดในการคิดและตอบคำถาม.',
103
+ 'vi': 'Chọn ngôn ngữ mà bạn thành thạo nhất để suy nghĩ và trả lời.',
104
+ }
105
+ }
File without changes
@@ -0,0 +1,111 @@
1
+ from typing import Any, Dict, List
2
+
3
+ from evalscope.api.benchmark import BenchmarkMeta, VisionLanguageAdapter
4
+ from evalscope.api.dataset import Sample
5
+ from evalscope.api.evaluator.state import TaskState
6
+ from evalscope.api.messages import ChatMessageUser, Content, ContentImage, ContentText
7
+ from evalscope.api.metric.scorer import AggScore, SampleScore, Score
8
+ from evalscope.api.registry import register_benchmark
9
+ from evalscope.constants import Tags
10
+ from evalscope.utils.io_utils import bytes_to_base64
11
+ from evalscope.utils.logger import get_logger
12
+
13
+ logger = get_logger()
14
+
15
+
16
+ @register_benchmark(
17
+ BenchmarkMeta(
18
+ name='pope',
19
+ pretty_name='POPE',
20
+ tags=[Tags.MULTI_MODAL, Tags.HALLUCINATION, Tags.YES_NO],
21
+ description=
22
+ 'POPE (Polling-based Object Probing Evaluation) is a benchmark designed to evaluate object hallucination in large vision-language models (LVLMs). It tests models by having them answer simple yes/no questions about the presence of specific objects in an image. This method helps measure how accurately a model\'s responses align with the visual content, with a focus on identifying instances where models claim objects exist that are not actually present. The benchmark employs various sampling strategies, including random, popular, and adversarial sampling, to create a robust set of questions for assessment.', # noqa: E501
23
+ dataset_id='lmms-lab/POPE',
24
+ metric_list=['accuracy', 'precision', 'recall', 'f1_score', 'yes_ratio'],
25
+ subset_list=['popular', 'adversarial', 'random'],
26
+ default_subset='Full',
27
+ prompt_template='{question}\nPlease answer YES or NO without an explanation.',
28
+ )
29
+ )
30
+ class PopeAdapter(VisionLanguageAdapter):
31
+
32
+ def __init__(self, **kwargs):
33
+ super().__init__(**kwargs)
34
+ self.split_as_subset = True
35
+ self.add_overall_metric = False
36
+
37
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
38
+
39
+ input_text = self.prompt_template.format(question=record['question'])
40
+ content_list: List[Content] = [ContentText(text=input_text)]
41
+ image = record.get('image')
42
+ if image:
43
+ image_base64 = bytes_to_base64(image['bytes'], format='png', add_header=True)
44
+ content_list.append(ContentImage(image=image_base64))
45
+ answer = record['answer'].upper() # 'YES' or 'NO'
46
+ return Sample(
47
+ input=[ChatMessageUser(content=content_list)],
48
+ target=answer,
49
+ metadata={
50
+ 'id': record.get('id'),
51
+ 'answer': answer,
52
+ 'category': record.get('category'),
53
+ 'question_id': record.get('question_id'),
54
+ }
55
+ )
56
+
57
+ def match_score(self, original_prediction, filtered_prediction, reference, task_state) -> Score:
58
+ score = Score(
59
+ extracted_prediction=filtered_prediction,
60
+ prediction=original_prediction,
61
+ )
62
+ # Check if the reference answer is in the filtered prediction
63
+ result = 1 if reference in filtered_prediction.strip().upper() else 0
64
+ score.value = {'acc': result}
65
+ return score
66
+
67
+ def aggregate_scores(self, sample_scores: List[SampleScore]) -> List[AggScore]:
68
+ """
69
+ Custom aggregation to compute accuracy, precision, recall, f1_score, and yes_ratio.
70
+ """
71
+
72
+ def compute_metrics(scores: List[SampleScore]):
73
+ tp = fp = tn = fn = 0
74
+ yes_count = 0
75
+ total_count = len(scores)
76
+
77
+ for ss in scores:
78
+ gt = ss.sample_metadata['answer'].strip().upper()
79
+ # Get prediction based on score
80
+ pred = gt if ss.score.main_value == 1 else ('NO' if gt == 'YES' else 'YES')
81
+ if pred == 'YES':
82
+ yes_count += 1
83
+ if pred == 'YES' and gt == 'YES':
84
+ tp += 1
85
+ elif pred == 'YES' and gt == 'NO':
86
+ fp += 1
87
+ elif pred == 'NO' and gt == 'NO':
88
+ tn += 1
89
+ elif pred == 'NO' and gt == 'YES':
90
+ fn += 1
91
+
92
+ accuracy = (tp + tn) / total_count if total_count > 0 else 0.0
93
+ precision = tp / (tp + fp) if (tp + fp) > 0 else 0.0
94
+ recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0
95
+ f1_score = (2 * precision * recall) / (precision + recall) if (precision + recall) > 0 else 0.0
96
+ yes_ratio = yes_count / total_count if total_count > 0 else 0.0
97
+
98
+ return {
99
+ 'accuracy': accuracy,
100
+ 'precision': precision,
101
+ 'recall': recall,
102
+ 'f1_score': f1_score,
103
+ 'yes_ratio': yes_ratio
104
+ }
105
+
106
+ overall_metrics = compute_metrics(sample_scores)
107
+ agg_scores = []
108
+ for metric_name, value in overall_metrics.items():
109
+ agg_scores.append(AggScore(metric_name=metric_name, score=value, num=len(sample_scores), metadata={}))
110
+
111
+ return agg_scores
File without changes
@@ -0,0 +1,72 @@
1
+ # flake8: noqa: E501
2
+ import re
3
+ from typing import Any, Dict, List
4
+
5
+ from evalscope.api.benchmark import BenchmarkMeta, VisionLanguageAdapter
6
+ from evalscope.api.dataset import Sample
7
+ from evalscope.api.evaluator import TaskState
8
+ from evalscope.api.messages import ChatMessageUser, Content, ContentImage, ContentText
9
+ from evalscope.api.registry import register_benchmark
10
+ from evalscope.constants import Tags
11
+ from evalscope.utils.io_utils import bytes_to_base64
12
+ from evalscope.utils.logger import get_logger
13
+ from evalscope.utils.multi_choices import MultipleChoiceTemplate, parse_answers, prompt
14
+
15
+ logger = get_logger()
16
+
17
+ MULT_CHOICE_PROMPT = MultipleChoiceTemplate.SINGLE_ANSWER_COT
18
+
19
+ SUBSET_LIST = ['chart', 'web', 'map']
20
+
21
+
22
+ @register_benchmark(
23
+ BenchmarkMeta(
24
+ name='seed_bench_2_plus',
25
+ pretty_name='SEED-Bench-2-Plus',
26
+ dataset_id='evalscope/SEED-Bench-2-Plus',
27
+ tags=[Tags.KNOWLEDGE, Tags.REASONING, Tags.MULTIPLE_CHOICE, Tags.MULTI_MODAL],
28
+ description=
29
+ 'SEED-Bench-2-Plus is a large-scale benchmark to evaluate Multimodal Large Language Models (MLLMs). It consists of 2.3K multiple-choice questions with precise human annotations, spanning three broad categories: Charts, Maps, and Webs, each of which covers a wide spectrum of text-rich scenarios in the real world.',
30
+ subset_list=SUBSET_LIST,
31
+ metric_list=['acc'],
32
+ eval_split='test',
33
+ prompt_template=MULT_CHOICE_PROMPT,
34
+ )
35
+ )
36
+ class SeedBench2PlusAdapter(VisionLanguageAdapter):
37
+
38
+ def __init__(self, **kwargs):
39
+ super().__init__(**kwargs)
40
+ self.reformat_subset = True
41
+
42
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
43
+ question = record['question']
44
+ answers_list = [record['choice_A'], record['choice_B'], record['choice_C'], record['choice_D']]
45
+ input_text = prompt(question=question, choices=answers_list, template=self.prompt_template)
46
+ content_list: List[Content] = [ContentText(text=input_text)]
47
+ image = record['image']
48
+ if image:
49
+ image_base64 = bytes_to_base64(image['bytes'], format='png', add_header=True)
50
+ content_list.append(ContentImage(image=image_base64))
51
+ metadata = {
52
+ 'data_id': record['data_id'],
53
+ 'question_id': record['question_id'],
54
+ 'question_image_subtype': record['question_image_subtype'],
55
+ 'data_source': record['data_source'],
56
+ 'data_type': record['data_type'],
57
+ 'level': record['level'],
58
+ 'subpart': record['subpart'],
59
+ 'version': record['version'],
60
+ }
61
+ label_answer = record['answer']
62
+ return Sample(
63
+ input=[ChatMessageUser(content=content_list)],
64
+ choices=answers_list,
65
+ target=label_answer,
66
+ subset_key=record['question_image_type'],
67
+ metadata=metadata,
68
+ )
69
+
70
+ def extract_answer(self, prediction: str, task_state: TaskState) -> str:
71
+ answers = parse_answers(task_state)
72
+ return ''.join(sorted(list(answers)))
File without changes
@@ -0,0 +1,169 @@
1
+ # flake8: noqa: E501
2
+ import re
3
+ from typing import Any, Dict
4
+
5
+ from evalscope.api.benchmark import BenchmarkMeta, VisionLanguageAdapter
6
+ from evalscope.api.dataset import Sample
7
+ from evalscope.api.evaluator import TaskState
8
+ from evalscope.api.messages import ChatMessageUser, Content, ContentImage, ContentText
9
+ from evalscope.api.metric.scorer import Score
10
+ from evalscope.api.registry import register_benchmark
11
+ from evalscope.constants import Tags
12
+ from evalscope.utils.logger import get_logger
13
+
14
+ logger = get_logger()
15
+
16
+ GRADER_TEMPLATE = """
17
+ Your job is to look at a question, a gold target, and a predicted answer, and then assign a grade of either ["CORRECT", "INCORRECT", "NOT_ATTEMPTED"].
18
+ First, I will give examples of each grade, and then you will grade a new example.
19
+
20
+
21
+ The following are examples of CORRECT predicted answers.
22
+ ```
23
+ Question: What are the names of Barack Obama's children?
24
+ Gold target: Malia Obama and Sasha Obama
25
+ Predicted answer 1: sasha and malia obama
26
+ Predicted answer 2: most people would say Malia and Sasha, but I'm not sure and would have to double check
27
+ Predicted answer 3: Barack Obama has two daughters. Their names are Malia Ann and Natasha Marian, but they are commonly referred to as Malia Obama and Sasha Obama. Malia was born on July 4, 1998, and Sasha was born on June 10, 2001.
28
+ ```
29
+ These predicted answers are all CORRECT because:
30
+ - They fully contain the important information in the gold target.
31
+ - They do not contain any information that contradicts the gold target.
32
+ - Only semantic meaning matters; capitalization, punctuation, grammar, and order don't matter.
33
+ - Hedging and guessing are permissible, provided that the gold target is fully included and the response contains no incorrect information or contradictions.
34
+
35
+
36
+ The following are examples of INCORRECT predicted answers.
37
+ ```
38
+ Question: What are the names of Barack Obama's children?
39
+ Gold target: Malia and Sasha
40
+ Predicted answer 1: Malia.
41
+ Predicted answer 2: Malia, Sasha, and Susan.
42
+ Predicted answer 3: Barack Obama does not have any children.
43
+ Predicted answer 4: I think it's either Malia and Sasha. Or it could be Malia and Jackie. Or it could be Joey and Malia.
44
+ Predicted answer 4: While I don't know their exact names, I can tell you that Barack Obama has three children.
45
+ Predicted answer 5: It's possible you may mean Betsy and Olivia. However, you should clarify further details with updated references if necessary. Is that the correct answer?
46
+ Predicted answer 6: It may be the case that Obama's child is named James. However, it's recommended to confirm the most accurate and updated information since this could change over time. This model may not always reflect the most current information.
47
+ ```
48
+ These predicted answers are all INCORRECT because:
49
+ - A factual statement in the answer contradicts the gold target. Incorrect statements that have some hedging (e.g., "it is possible that", "although i'm not sure, i think") are also considered incorrect.
50
+
51
+
52
+ The following are examples of NOT_ATTEMPTED predicted answers.
53
+ ```
54
+ Question: What are the names of Barack Obama's children?
55
+ Gold target: Malia and Sasha
56
+ Predicted answer 1: I don't know.
57
+ Predicted answer 2: I need more context about which Obama you are talking about.
58
+ Predicted answer 3: Without researching the web, I cannot answer this question. However, I can tell you that Barack Obama has two children.
59
+ Predicted answer 4: Barack Obama has two children. I know that one of them is Malia, but I'm not sure about the other one.
60
+ ```
61
+ These predicted answers are all NOT_ATTEMPTED because:
62
+ - The important information in the gold target is not included in the answer.
63
+ - No statements in the answer contradict the gold target.
64
+
65
+
66
+ Also note the following things:
67
+ - For grading questions where the gold target is a number, the predicted answer needs to be correct to the last significant figure in the gold answer. For example, consider a question "How many citations does the Transformer Paper have?" with gold target "120k".
68
+ - Predicted answers "120k", "124k", and 115k" are all CORRECT.
69
+ - Predicted answers "100k" and "113k" are INCORRECT.
70
+ - Predicted answers "around 100k" and "more than 50k" are considered NOT_ATTEMPTED because they neither confirm nor contradict the gold target.
71
+ - The gold target may contain more information than the question. In such cases, the predicted answer only needs to contain the information that is in the question.
72
+ - For example, consider the question "What episode did Derek and Meredith get legally married in Grey's Anatomy?" with gold target "Season 7, Episode 20: White Wedding". Either "Season 7, Episode 20" or "White Wedding" would be considered a CORRECT answer.
73
+ - Do not punish predicted answers if they omit information that would be clearly inferred from the question.
74
+ - For example, consider the question "What city is OpenAI headquartered in?" and the gold target "San Francisco, California". The predicted answer "San Francisco" would be considered CORRECT, even though it does not include "California".
75
+ - Consider the question "What award did A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity win at NAACL '24?", the gold target is "Outstanding Paper Award". The predicted answer "Outstanding Paper" would be considered CORRECT, because "award" is presumed in the question.
76
+ - For the question "What is the height of Jason Wei in meters?", the gold target is "1.73 m". The predicted answer "1.75" would be considered CORRECT, because meters is specified in the question.
77
+ - For the question "What is the name of Barack Obama's wife?", the gold target is "Michelle Obama". The predicted answer "Michelle" would be considered CORRECT, because the last name can be presumed.
78
+ - Do not punish for typos in people's name if it's clearly the same name.
79
+ - For example, if the gold target is "Hyung Won Chung", you can consider the following predicted answers as correct: "Hyoong Won Choong", "Hyungwon Chung", or "Hyun Won Chung".
80
+
81
+
82
+ Here is a new example. Simply reply with either CORRECT, INCORRECT, NOT_ATTEMPTED. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
83
+ ```
84
+ Question: {question}
85
+ Gold target: {target}
86
+ Predicted answer: {predicted_answer}
87
+ ```
88
+
89
+ Grade the predicted answer of this new question as one of:
90
+ A: CORRECT
91
+ B: INCORRECT
92
+ C: NOT_ATTEMPTED
93
+
94
+ Just return the letters "A", "B", or "C", with no text around it.
95
+ """.strip() # noqa: E501
96
+
97
+
98
+ @register_benchmark(
99
+ BenchmarkMeta(
100
+ name='simple_vqa',
101
+ pretty_name='SimpleVQA',
102
+ dataset_id='m-a-p/SimpleVQA',
103
+ tags=[Tags.REASONING, Tags.MULTI_MODAL, Tags.QA],
104
+ description=
105
+ 'SimpleVQA, the first comprehensive multi-modal benchmark to evaluate the factuality ability of MLLMs to answer natural language short questions. SimpleVQA is characterized by six key features: it covers multiple tasks and multiple scenarios, ensures high quality and challenging queries, maintains static and timeless reference answers, and is straightforward to evaluate.',
106
+ metric_list=['acc'],
107
+ eval_split='test',
108
+ prompt_template='Answer the question:\n\n{question}',
109
+ )
110
+ )
111
+ class SimpleVQAAdapter(VisionLanguageAdapter):
112
+
113
+ def __init__(self, **kwargs):
114
+ super().__init__(**kwargs)
115
+ self._use_llm_judge = True # Use LLM as a judge by default
116
+
117
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
118
+ content_list: list[Content] = [ContentText(text=self.prompt_template.format(question=record['question']))]
119
+ image_base64 = record['image']
120
+ content_list.append(ContentImage(image=f'data:image/jpeg;base64,{image_base64}'))
121
+ return Sample(
122
+ input=[ChatMessageUser(content=content_list)],
123
+ target=record['answer'],
124
+ metadata={
125
+ 'data_id': record['data_id'],
126
+ 'image_description': record['image_description'],
127
+ 'language': record['language'],
128
+ 'original_category': record['original_category'],
129
+ 'source': record['source'],
130
+ 'atomic_question': record['atomic_question'],
131
+ 'atomic_fact': record['atomic_fact'],
132
+ }
133
+ )
134
+
135
+ def llm_match_score(
136
+ self,
137
+ original_prediction: str,
138
+ filtered_prediction: str,
139
+ reference: str,
140
+ task_state: TaskState,
141
+ ) -> Score:
142
+ score = Score(
143
+ extracted_prediction=filtered_prediction,
144
+ prediction=original_prediction,
145
+ )
146
+
147
+ question = task_state.input_text
148
+
149
+ # Request judge and obtain score
150
+ prompt = GRADER_TEMPLATE.format(question=question, target=reference, predicted_answer=filtered_prediction)
151
+ judge_response = self.llm_judge.judge(prompt)
152
+ # parse grading response
153
+ match = re.search(r'(A|B|C)', judge_response)
154
+ res = match.group(0) if match else 'C'
155
+
156
+ # Set score based on the match result
157
+ score.value = {
158
+ 'is_correct': 1 if res == 'A' else 0,
159
+ 'is_incorrect': 1 if res == 'B' else 0,
160
+ 'is_not_attempted': 1 if res == 'C' else 0,
161
+ }
162
+ score.explanation = f'LLM judge: {judge_response}'
163
+ score.metadata = {
164
+ 'source': 'llm_judge',
165
+ 'judge_strategy': self.judge_strategy,
166
+ 'model': self.llm_judge.model_id
167
+ }
168
+ score.main_score_name = 'is_correct'
169
+ return score
@@ -26,7 +26,7 @@ logger = get_logger()
26
26
  description='A benchmark emulating dynamic conversations between a user (simulated by language models) '
27
27
  'and a language agent provided with domain-specific API tools and policy guidelines. '
28
28
  'Please install it with `pip install git+https://github.com/sierra-research/tau-bench` '
29
- 'before evaluating and set a user model. [Usage Example](https://evalscope.readthedocs.io/zh-cn/latest/third_party/tau_bench.html)', # noqa: E501
29
+ 'before evaluating and set a user model. [Usage Example](https://evalscope.readthedocs.io/en/latest/third_party/tau_bench.html)', # noqa: E501
30
30
  dataset_id='https://github.com/sierra-research/tau-bench',
31
31
  subset_list=['airline', 'retail'],
32
32
  metric_list=['Pass^1'],
@@ -21,7 +21,7 @@ logger = get_logger()
21
21
  description='ToolBench is a benchmark for evaluating AI models on tool use tasks. '
22
22
  'It includes various subsets such as in-domain and out-of-domain, '
23
23
  'each with its own set of problems that require step-by-step reasoning to arrive at the correct answer. '
24
- '[Usage Example](https://evalscope.readthedocs.io/zh-cn/latest/third_party/toolbench.html)',
24
+ '[Usage Example](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html)',
25
25
  dataset_id='AI-ModelScope/ToolBench-Static',
26
26
  subset_list=['in_domain', 'out_of_domain'],
27
27
  metric_list=['Act.EM', 'Plan.EM', 'F1', 'HalluRate', 'Rouge-L'],
File without changes