evalscope 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of evalscope might be problematic. Click here for more details.
- evalscope/api/benchmark/__init__.py +1 -1
- evalscope/api/benchmark/adapters/__init__.py +2 -0
- evalscope/api/benchmark/adapters/default_data_adapter.py +7 -4
- evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
- evalscope/api/benchmark/adapters/multi_choice_adapter.py +5 -2
- evalscope/api/benchmark/adapters/text2image_adapter.py +12 -10
- evalscope/api/benchmark/adapters/vision_language_adapter.py +8 -0
- evalscope/api/benchmark/benchmark.py +62 -2
- evalscope/api/benchmark/meta.py +9 -0
- evalscope/api/dataset/dataset.py +6 -6
- evalscope/api/dataset/loader.py +2 -1
- evalscope/api/evaluator/cache.py +24 -1
- evalscope/api/evaluator/evaluator.py +5 -0
- evalscope/api/evaluator/state.py +17 -1
- evalscope/api/messages/__init__.py +1 -0
- evalscope/api/messages/chat_message.py +52 -2
- evalscope/api/metric/scorer.py +15 -7
- evalscope/api/mixin/__init__.py +1 -1
- evalscope/api/mixin/llm_judge_mixin.py +2 -0
- evalscope/api/mixin/sandbox_mixin.py +204 -0
- evalscope/api/model/generate_config.py +1 -6
- evalscope/api/model/model.py +5 -2
- evalscope/api/tool/tool_info.py +1 -1
- evalscope/app/app.py +3 -0
- evalscope/app/ui/single_model.py +3 -3
- evalscope/app/utils/data_utils.py +7 -7
- evalscope/app/utils/env_utils.py +12 -0
- evalscope/app/utils/text_utils.py +14 -12
- evalscope/arguments.py +8 -4
- evalscope/backend/opencompass/backend_manager.py +0 -2
- evalscope/backend/rag_eval/utils/embedding.py +9 -1
- evalscope/benchmarks/ai2d/ai2d_adapter.py +53 -0
- evalscope/benchmarks/amc/amc_adapter.py +46 -0
- evalscope/benchmarks/bbh/bbh_adapter.py +43 -17
- evalscope/benchmarks/bfcl/bfcl_adapter.py +142 -7
- evalscope/benchmarks/bfcl/generation.py +9 -9
- evalscope/benchmarks/ceval/ceval_adapter.py +1 -2
- evalscope/benchmarks/data_collection/data_collection_adapter.py +23 -19
- evalscope/benchmarks/drop/drop_adapter.py +1 -1
- evalscope/benchmarks/frames/frames_adapter.py +2 -1
- evalscope/benchmarks/general_arena/general_arena_adapter.py +5 -1
- evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
- evalscope/benchmarks/healthbench/utils.py +102 -0
- evalscope/benchmarks/humaneval/humaneval_adapter.py +19 -35
- evalscope/benchmarks/humaneval/utils.py +235 -0
- evalscope/benchmarks/ifeval/instructions_util.py +2 -3
- evalscope/benchmarks/image_edit/__init__.py +0 -0
- evalscope/benchmarks/image_edit/gedit/__init__.py +0 -0
- evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
- evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
- evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
- evalscope/benchmarks/live_code_bench/evaluate_utils.py +13 -6
- evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +60 -37
- evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
- evalscope/benchmarks/math_500/math_500_adapter.py +0 -1
- evalscope/benchmarks/math_vista/__init__.py +0 -0
- evalscope/benchmarks/math_vista/math_vista_adapter.py +129 -0
- evalscope/benchmarks/minerva_math/__init__.py +0 -0
- evalscope/benchmarks/minerva_math/minerva_math_adapter.py +48 -0
- evalscope/benchmarks/mm_bench/__init__.py +0 -0
- evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
- evalscope/benchmarks/mm_star/__init__.py +0 -0
- evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
- evalscope/benchmarks/mmmu/__init__.py +0 -0
- evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
- evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
- evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +124 -0
- evalscope/benchmarks/multi_if/__init__.py +0 -0
- evalscope/benchmarks/multi_if/ifeval.py +3354 -0
- evalscope/benchmarks/multi_if/metrics.py +120 -0
- evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
- evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +6 -5
- evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
- evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
- evalscope/benchmarks/olympiad_bench/utils.py +565 -0
- evalscope/benchmarks/omni_bench/__init__.py +0 -0
- evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
- evalscope/benchmarks/real_world_qa/__init__.py +0 -0
- evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
- evalscope/benchmarks/tau_bench/generation.py +1 -1
- evalscope/benchmarks/tau_bench/tau_bench_adapter.py +20 -19
- evalscope/benchmarks/text2image/__init__.py +0 -0
- evalscope/benchmarks/{aigc/t2i → text2image}/evalmuse_adapter.py +3 -1
- evalscope/benchmarks/{aigc/t2i → text2image}/genai_bench_adapter.py +2 -2
- evalscope/benchmarks/{aigc/t2i → text2image}/general_t2i_adapter.py +1 -1
- evalscope/benchmarks/{aigc/t2i → text2image}/hpdv2_adapter.py +7 -2
- evalscope/benchmarks/{aigc/t2i → text2image}/tifa_adapter.py +1 -0
- evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +1 -2
- evalscope/cli/start_app.py +7 -1
- evalscope/cli/start_perf.py +7 -1
- evalscope/config.py +96 -14
- evalscope/constants.py +11 -0
- evalscope/evaluator/evaluator.py +30 -10
- evalscope/metrics/llm_judge.py +19 -7
- evalscope/metrics/metric.py +27 -2
- evalscope/models/image_edit_model.py +125 -0
- evalscope/models/model_apis.py +22 -0
- evalscope/models/openai_compatible.py +3 -0
- evalscope/models/text2image_model.py +2 -2
- evalscope/models/utils/openai.py +8 -6
- evalscope/perf/arguments.py +2 -0
- evalscope/perf/benchmark.py +2 -0
- evalscope/perf/plugin/api/base.py +2 -2
- evalscope/perf/plugin/api/default_api.py +7 -7
- evalscope/perf/plugin/api/openai_api.py +83 -19
- evalscope/perf/plugin/datasets/flickr8k.py +2 -2
- evalscope/perf/plugin/datasets/kontext_bench.py +2 -2
- evalscope/perf/plugin/datasets/random_vl_dataset.py +2 -2
- evalscope/perf/utils/benchmark_util.py +7 -5
- evalscope/perf/utils/local_server.py +3 -0
- evalscope/report/__init__.py +0 -1
- evalscope/report/combinator.py +0 -25
- evalscope/report/generator.py +8 -87
- evalscope/report/report.py +8 -4
- evalscope/run.py +9 -5
- evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -4
- evalscope/utils/chat_service.py +1 -1
- evalscope/utils/function_utils.py +41 -0
- evalscope/utils/import_utils.py +73 -1
- evalscope/utils/io_utils.py +56 -7
- evalscope/utils/json_schema.py +23 -2
- evalscope/utils/logger.py +19 -0
- evalscope/utils/model_utils.py +4 -3
- evalscope/utils/multi_choices.py +23 -6
- evalscope/version.py +2 -2
- {evalscope-1.0.0.dist-info → evalscope-1.0.2.dist-info}/METADATA +17 -24
- {evalscope-1.0.0.dist-info → evalscope-1.0.2.dist-info}/RECORD +145 -103
- tests/benchmark/test_eval.py +80 -37
- tests/benchmark/test_image_edit.py +65 -0
- tests/benchmark/test_sandbox.py +81 -0
- tests/benchmark/test_vlm.py +137 -0
- tests/cli/test_all.py +83 -43
- tests/cli/test_collection.py +8 -5
- tests/cli/test_reasoning.py +81 -0
- tests/common.py +73 -0
- tests/perf/test_perf.py +44 -14
- tests/rag/test_clip_benchmark.py +0 -3
- evalscope/api/mixin/dataset_mixin.py +0 -105
- evalscope/benchmarks/aigc/i2i/general_i2i_adapter.py +0 -44
- tests/aigc/__init__.py +0 -1
- /evalscope/benchmarks/{aigc → ai2d}/__init__.py +0 -0
- /evalscope/benchmarks/{aigc/i2i → amc}/__init__.py +0 -0
- /evalscope/benchmarks/{aigc/t2i → healthbench}/__init__.py +0 -0
- {evalscope-1.0.0.dist-info → evalscope-1.0.2.dist-info}/LICENSE +0 -0
- {evalscope-1.0.0.dist-info → evalscope-1.0.2.dist-info}/WHEEL +0 -0
- {evalscope-1.0.0.dist-info → evalscope-1.0.2.dist-info}/entry_points.txt +0 -0
- {evalscope-1.0.0.dist-info → evalscope-1.0.2.dist-info}/top_level.txt +0 -0
- /tests/{aigc → benchmark}/test_t2i.py +0 -0
|
@@ -241,6 +241,7 @@ class DefaultDataAdapter(DataAdapter):
|
|
|
241
241
|
filter_func=self.sample_filter,
|
|
242
242
|
limit=self.limit if not self.reformat_subset else None, # Limit number of samples if specified
|
|
243
243
|
repeats=self.repeats, # Number of repetitions for each sample
|
|
244
|
+
shuffle=self.shuffle, # Shuffle dataset if enabled
|
|
244
245
|
shuffle_choices=self.shuffle_choices, # Shuffle choices if requested
|
|
245
246
|
data_source=self.dataset_hub, # Data source configuration
|
|
246
247
|
)
|
|
@@ -641,9 +642,7 @@ class DefaultDataAdapter(DataAdapter):
|
|
|
641
642
|
"""
|
|
642
643
|
pass
|
|
643
644
|
|
|
644
|
-
def _on_generate_report(
|
|
645
|
-
self, scores: Dict[str, List[AggScore]], model_name: str, add_aggregation_name: bool = True
|
|
646
|
-
) -> Report:
|
|
645
|
+
def _on_generate_report(self, scores: Dict[str, List[AggScore]], model_name: str) -> Report:
|
|
647
646
|
"""
|
|
648
647
|
Hook method called during report generation.
|
|
649
648
|
|
|
@@ -659,7 +658,7 @@ class DefaultDataAdapter(DataAdapter):
|
|
|
659
658
|
Report: The generated evaluation report
|
|
660
659
|
"""
|
|
661
660
|
return ReportGenerator.generate_report(
|
|
662
|
-
score_dict=scores, model_name=model_name, data_adapter=self, add_aggregation_name=add_aggregation_name
|
|
661
|
+
score_dict=scores, model_name=model_name, data_adapter=self, add_aggregation_name=self.add_aggregation_name
|
|
663
662
|
)
|
|
664
663
|
|
|
665
664
|
@override
|
|
@@ -681,3 +680,7 @@ class DefaultDataAdapter(DataAdapter):
|
|
|
681
680
|
report = self._on_generate_report(scores, model_name=model_name)
|
|
682
681
|
self._on_generate_report_end(report, output_dir, **kwargs)
|
|
683
682
|
return report
|
|
683
|
+
|
|
684
|
+
def finalize(self, *args, **kwargs):
|
|
685
|
+
# Finalize the evaluation process
|
|
686
|
+
self.sandbox_finalize(*args, **kwargs)
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from typing import Optional
|
|
3
|
+
|
|
4
|
+
from evalscope.constants import EvalType, FileConstants
|
|
5
|
+
from evalscope.utils import get_logger
|
|
6
|
+
from evalscope.utils.function_utils import thread_safe
|
|
7
|
+
from evalscope.utils.io_utils import jsonl_to_list
|
|
8
|
+
from .text2image_adapter import Text2ImageAdapter
|
|
9
|
+
|
|
10
|
+
logger = get_logger()
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ImageEditAdapter(Text2ImageAdapter):
|
|
14
|
+
"""
|
|
15
|
+
Support two methods:
|
|
16
|
+
1. Inference using modelscope pipeline
|
|
17
|
+
2. Load local inference jsonl file with key to corresponding prompt
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(self, **kwargs):
|
|
21
|
+
super().__init__(**kwargs)
|
|
22
|
+
|
|
23
|
+
self.local_file = self.extra_params.get('local_file', None)
|
|
24
|
+
self.id_key = self.extra_params.get('id_key', FileConstants.ID)
|
|
25
|
+
self.image_key = self.extra_params.get('image_key', FileConstants.IMAGE_PATH)
|
|
26
|
+
self.local_data = self.load_local_file()
|
|
27
|
+
|
|
28
|
+
def load_local_file(self) -> Optional[dict]:
|
|
29
|
+
if not self.local_file:
|
|
30
|
+
return None
|
|
31
|
+
|
|
32
|
+
# Load file and check
|
|
33
|
+
data_list = jsonl_to_list(self.local_file)
|
|
34
|
+
data_dict = {}
|
|
35
|
+
for record in data_list:
|
|
36
|
+
if self.image_key not in record:
|
|
37
|
+
raise ValueError(f"Image key '{self.image_key}' not found in record: {record}, file {self.local_file}")
|
|
38
|
+
if self.id_key not in record:
|
|
39
|
+
raise ValueError(f"ID key '{self.id_key}' not found in record: {record}, file {self.local_file}")
|
|
40
|
+
|
|
41
|
+
image_path = record[self.image_key]
|
|
42
|
+
if not os.path.isabs(image_path):
|
|
43
|
+
image_path = os.path.join(os.path.dirname(self.local_file), image_path)
|
|
44
|
+
if not os.path.exists(image_path):
|
|
45
|
+
raise FileNotFoundError(f"Image file '{image_path}' not found.")
|
|
46
|
+
|
|
47
|
+
data_dict[record[self.id_key]] = record
|
|
48
|
+
return data_dict
|
|
49
|
+
|
|
50
|
+
def get_image_path_from_id(self, image_id) -> Optional[str]:
|
|
51
|
+
if not self.local_file:
|
|
52
|
+
return None
|
|
53
|
+
|
|
54
|
+
record = self.local_data.get(image_id)
|
|
55
|
+
if not record:
|
|
56
|
+
return None
|
|
57
|
+
|
|
58
|
+
return record[self.image_key]
|
|
59
|
+
|
|
60
|
+
def _post_process_samples(self):
|
|
61
|
+
super()._post_process_samples()
|
|
62
|
+
|
|
63
|
+
# Add local image path if exists
|
|
64
|
+
for subset in self.test_dataset.keys():
|
|
65
|
+
for sample in self.test_dataset[subset]:
|
|
66
|
+
local_image_path = self.get_image_path_from_id(sample.metadata.get(FileConstants.ID))
|
|
67
|
+
if local_image_path:
|
|
68
|
+
sample.metadata[FileConstants.IMAGE_PATH] = local_image_path
|
|
69
|
+
|
|
70
|
+
def sample_filter(self, sample) -> bool:
|
|
71
|
+
"""
|
|
72
|
+
Filter samples based on metadata availability.
|
|
73
|
+
If local file is not available, all samples are considered valid.
|
|
74
|
+
Otherwise, only samples with valid metadata and image path are kept.
|
|
75
|
+
"""
|
|
76
|
+
if not self.local_data:
|
|
77
|
+
return True
|
|
78
|
+
else:
|
|
79
|
+
sample_id = sample.metadata.get(FileConstants.ID)
|
|
80
|
+
if (not sample_id) or (not self.get_image_path_from_id(sample_id)):
|
|
81
|
+
return False
|
|
82
|
+
return True
|
|
@@ -18,8 +18,11 @@ class MultiChoiceAdapter(DefaultDataAdapter):
|
|
|
18
18
|
This adapter formats the input for multi-choice questions and handles few-shot examples.
|
|
19
19
|
"""
|
|
20
20
|
|
|
21
|
-
|
|
22
|
-
|
|
21
|
+
def __init__(self, **kwargs):
|
|
22
|
+
super().__init__(**kwargs)
|
|
23
|
+
|
|
24
|
+
self.multiple_correct: bool = False
|
|
25
|
+
"""Whether the benchmark allows multiple correct answers."""
|
|
23
26
|
|
|
24
27
|
def format_prompt_template(self, sample: Sample) -> str:
|
|
25
28
|
"""
|
|
@@ -8,7 +8,7 @@ from evalscope.api.messages.content import ContentImage
|
|
|
8
8
|
from evalscope.api.metric import Score
|
|
9
9
|
from evalscope.api.model import ChatCompletionChoice, Model, ModelOutput
|
|
10
10
|
from evalscope.api.registry import get_metric
|
|
11
|
-
from evalscope.constants import EvalType
|
|
11
|
+
from evalscope.constants import EvalType, FileConstants
|
|
12
12
|
from evalscope.utils import get_logger
|
|
13
13
|
from evalscope.utils.function_utils import thread_safe
|
|
14
14
|
from .default_data_adapter import DefaultDataAdapter
|
|
@@ -19,6 +19,11 @@ logger = get_logger()
|
|
|
19
19
|
class Text2ImageAdapter(DefaultDataAdapter):
|
|
20
20
|
"""Text to Image Adapter for benchmarks."""
|
|
21
21
|
|
|
22
|
+
def __init__(self, **kwargs):
|
|
23
|
+
super().__init__(**kwargs)
|
|
24
|
+
|
|
25
|
+
self.add_aggregation_name = False # Do not add aggregation name in the report by default
|
|
26
|
+
|
|
22
27
|
def load_from_disk(self, **kwargs):
|
|
23
28
|
return super().load_from_disk(use_local_loader=True)
|
|
24
29
|
|
|
@@ -27,11 +32,12 @@ class Text2ImageAdapter(DefaultDataAdapter):
|
|
|
27
32
|
return Sample(
|
|
28
33
|
input=[ChatMessageUser(content=record['prompt'])],
|
|
29
34
|
metadata={
|
|
30
|
-
'id': record['id'],
|
|
31
35
|
'prompt': record['prompt'],
|
|
32
36
|
'category': record.get('category', ''),
|
|
33
37
|
'tags': record.get('tags', []),
|
|
34
|
-
|
|
38
|
+
FileConstants.ID: record[FileConstants.ID],
|
|
39
|
+
FileConstants.IMAGE_PATH: record.get(FileConstants.IMAGE_PATH,
|
|
40
|
+
''), # Optional field for existing image path
|
|
35
41
|
}
|
|
36
42
|
)
|
|
37
43
|
|
|
@@ -83,7 +89,7 @@ class Text2ImageAdapter(DefaultDataAdapter):
|
|
|
83
89
|
completed=True,
|
|
84
90
|
)
|
|
85
91
|
else:
|
|
86
|
-
image_id = f
|
|
92
|
+
image_id = f'{sample.metadata.get(FileConstants.ID, sample.id)}_{sample.group_id}'
|
|
87
93
|
output_path = os.path.join(output_dir, 'images', f'{image_id}.png')
|
|
88
94
|
if not os.path.exists(os.path.dirname(output_path)):
|
|
89
95
|
os.makedirs(os.path.dirname(output_path))
|
|
@@ -96,7 +102,7 @@ class Text2ImageAdapter(DefaultDataAdapter):
|
|
|
96
102
|
with open(output_path, 'wb') as f:
|
|
97
103
|
f.write(base64.b64decode(image_base64))
|
|
98
104
|
|
|
99
|
-
sample.metadata[
|
|
105
|
+
sample.metadata[FileConstants.IMAGE_PATH] = output_path
|
|
100
106
|
return TaskState(
|
|
101
107
|
model=model.name,
|
|
102
108
|
sample=sample,
|
|
@@ -111,7 +117,7 @@ class Text2ImageAdapter(DefaultDataAdapter):
|
|
|
111
117
|
self, original_prediction: str, filtered_prediction: str, reference: str, task_state: TaskState
|
|
112
118
|
) -> Score:
|
|
113
119
|
# Get prediction and prompt from task state
|
|
114
|
-
image_path = task_state.metadata.get(
|
|
120
|
+
image_path = task_state.metadata.get(FileConstants.IMAGE_PATH, original_prediction)
|
|
115
121
|
prompt = task_state.input[0].content
|
|
116
122
|
meta = task_state.metadata
|
|
117
123
|
|
|
@@ -149,7 +155,3 @@ class Text2ImageAdapter(DefaultDataAdapter):
|
|
|
149
155
|
score.metadata[metric_name] = f'error: {str(e)}'
|
|
150
156
|
|
|
151
157
|
return score
|
|
152
|
-
|
|
153
|
-
def _on_generate_report(self, scores, model_name, add_aggregation_name=True):
|
|
154
|
-
# Don't add aggregation name for needle haystack adapter
|
|
155
|
-
return super()._on_generate_report(scores, model_name, False)
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
from .default_data_adapter import DefaultDataAdapter
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class VisionLanguageAdapter(DefaultDataAdapter):
|
|
5
|
+
"""Adapter for vision-language benchmarks. e.g., image captioning, visual question answering, etc."""
|
|
6
|
+
|
|
7
|
+
def __init__(self, **kwargs):
|
|
8
|
+
super().__init__(**kwargs)
|
|
@@ -9,7 +9,7 @@ from evalscope.api.dataset import DatasetDict, Sample
|
|
|
9
9
|
from evalscope.api.evaluator import TaskState
|
|
10
10
|
from evalscope.api.filter import FilterEnsemble, build_filter_ensemble
|
|
11
11
|
from evalscope.api.metric import AggScore, SampleScore
|
|
12
|
-
from evalscope.api.mixin import LLMJudgeMixin
|
|
12
|
+
from evalscope.api.mixin import LLMJudgeMixin, SandboxMixin
|
|
13
13
|
from evalscope.api.model import Model
|
|
14
14
|
from evalscope.report import Report
|
|
15
15
|
from evalscope.utils.logger import get_logger
|
|
@@ -21,7 +21,7 @@ if TYPE_CHECKING:
|
|
|
21
21
|
logger = get_logger()
|
|
22
22
|
|
|
23
23
|
|
|
24
|
-
class DataAdapter(LLMJudgeMixin, ABC):
|
|
24
|
+
class DataAdapter(LLMJudgeMixin, SandboxMixin, ABC):
|
|
25
25
|
"""
|
|
26
26
|
Data Adapter for the benchmark.
|
|
27
27
|
"""
|
|
@@ -43,6 +43,12 @@ class DataAdapter(LLMJudgeMixin, ABC):
|
|
|
43
43
|
self.save_metadata = True
|
|
44
44
|
"""Whether to save metadata in the review result"""
|
|
45
45
|
|
|
46
|
+
self.add_aggregation_name = True
|
|
47
|
+
"""Whether to add aggregation name in the report"""
|
|
48
|
+
|
|
49
|
+
self.add_overall_metric = True
|
|
50
|
+
"""Whether to add overall metric in the report"""
|
|
51
|
+
|
|
46
52
|
self.category_map = {}
|
|
47
53
|
"""Category map for the benchmark"""
|
|
48
54
|
|
|
@@ -86,6 +92,11 @@ class DataAdapter(LLMJudgeMixin, ABC):
|
|
|
86
92
|
"""
|
|
87
93
|
pass
|
|
88
94
|
|
|
95
|
+
@abstractmethod
|
|
96
|
+
def finalize(self, *args, **kwargs) -> None:
|
|
97
|
+
"""Finalize the evaluation process."""
|
|
98
|
+
pass
|
|
99
|
+
|
|
89
100
|
@property
|
|
90
101
|
def name(self) -> str:
|
|
91
102
|
"""
|
|
@@ -170,6 +181,13 @@ class DataAdapter(LLMJudgeMixin, ABC):
|
|
|
170
181
|
"""
|
|
171
182
|
return self._benchmark_meta.default_subset
|
|
172
183
|
|
|
184
|
+
@default_subset.setter
|
|
185
|
+
def default_subset(self, value: str):
|
|
186
|
+
"""
|
|
187
|
+
Set the default subset of the benchmark.
|
|
188
|
+
"""
|
|
189
|
+
self._benchmark_meta.default_subset = value
|
|
190
|
+
|
|
173
191
|
@property
|
|
174
192
|
def few_shot_num(self) -> int:
|
|
175
193
|
"""
|
|
@@ -299,6 +317,48 @@ class DataAdapter(LLMJudgeMixin, ABC):
|
|
|
299
317
|
"""
|
|
300
318
|
return self._task_config.seed
|
|
301
319
|
|
|
320
|
+
@property
|
|
321
|
+
def shuffle(self) -> bool:
|
|
322
|
+
"""
|
|
323
|
+
Return whether to shuffle the dataset before evaluation.
|
|
324
|
+
"""
|
|
325
|
+
return self._benchmark_meta.shuffle
|
|
326
|
+
|
|
327
|
+
@shuffle.setter
|
|
328
|
+
def shuffle(self, value: bool):
|
|
329
|
+
"""
|
|
330
|
+
Set whether to shuffle the dataset before evaluation.
|
|
331
|
+
"""
|
|
332
|
+
self._benchmark_meta.shuffle = value
|
|
333
|
+
|
|
334
|
+
@property
|
|
335
|
+
def shuffle_choices(self) -> bool:
|
|
336
|
+
"""
|
|
337
|
+
Return whether to shuffle the choices in multiple-choice datasets.
|
|
338
|
+
"""
|
|
339
|
+
return self._benchmark_meta.shuffle_choices
|
|
340
|
+
|
|
341
|
+
@shuffle_choices.setter
|
|
342
|
+
def shuffle_choices(self, value: bool):
|
|
343
|
+
"""
|
|
344
|
+
Set whether to shuffle the choices in multiple-choice datasets.
|
|
345
|
+
"""
|
|
346
|
+
self._benchmark_meta.shuffle_choices = value
|
|
347
|
+
|
|
348
|
+
@property
|
|
349
|
+
def review_timeout(self) -> Optional[float]:
|
|
350
|
+
"""
|
|
351
|
+
Return the timeout for the review process.
|
|
352
|
+
"""
|
|
353
|
+
return self._benchmark_meta.review_timeout
|
|
354
|
+
|
|
355
|
+
@review_timeout.setter
|
|
356
|
+
def review_timeout(self, value: float):
|
|
357
|
+
"""
|
|
358
|
+
Set the timeout for the review process.
|
|
359
|
+
"""
|
|
360
|
+
self._benchmark_meta.review_timeout = value
|
|
361
|
+
|
|
302
362
|
@contextlib.contextmanager
|
|
303
363
|
def _temporary_attribute(self, attr_name: str, new_value):
|
|
304
364
|
"""
|
evalscope/api/benchmark/meta.py
CHANGED
|
@@ -73,6 +73,15 @@ class BenchmarkMeta:
|
|
|
73
73
|
aggregation: str = 'mean'
|
|
74
74
|
""" Aggregation function for the metrics. Default is 'mean'. Can be 'mean', 'pass@<k>' or a custom function name."""
|
|
75
75
|
|
|
76
|
+
shuffle: bool = False
|
|
77
|
+
"""Whether to shuffle the dataset before evaluation."""
|
|
78
|
+
|
|
79
|
+
shuffle_choices: bool = False
|
|
80
|
+
"""Whether to shuffle the choices in multiple-choice datasets."""
|
|
81
|
+
|
|
82
|
+
review_timeout: Optional[float] = None
|
|
83
|
+
""" Timeout for review in seconds."""
|
|
84
|
+
|
|
76
85
|
extra_params: Dict = field(default_factory=dict)
|
|
77
86
|
""" Additional parameters for the benchmark."""
|
|
78
87
|
|
evalscope/api/dataset/dataset.py
CHANGED
|
@@ -5,9 +5,8 @@ from dataclasses import dataclass, field
|
|
|
5
5
|
from pydantic import BaseModel, Field
|
|
6
6
|
from typing import Any, Callable, Dict, Iterator, List, Optional, Sequence, Union
|
|
7
7
|
|
|
8
|
-
from evalscope.api.messages import ChatMessage,
|
|
8
|
+
from evalscope.api.messages import ChatMessage, messages_to_markdown
|
|
9
9
|
from evalscope.api.tool import ToolInfo
|
|
10
|
-
from evalscope.utils.multi_choices import answer_character, answer_index
|
|
11
10
|
|
|
12
11
|
|
|
13
12
|
class Sample(BaseModel):
|
|
@@ -31,9 +30,6 @@ class Sample(BaseModel):
|
|
|
31
30
|
tools: Optional[List[ToolInfo]] = None
|
|
32
31
|
"""List of tools available to the model during inference (optional)."""
|
|
33
32
|
|
|
34
|
-
category: Optional[str] = None
|
|
35
|
-
"""Category of the sample (optional)."""
|
|
36
|
-
|
|
37
33
|
subset_key: Optional[str] = None
|
|
38
34
|
"""Key for the subset this sample belongs to, used for generating subsets (optional)."""
|
|
39
35
|
|
|
@@ -54,7 +50,7 @@ class Sample(BaseModel):
|
|
|
54
50
|
if isinstance(self.input, str):
|
|
55
51
|
input_text = self.input
|
|
56
52
|
else:
|
|
57
|
-
input_text =
|
|
53
|
+
input_text = messages_to_markdown(self.input, max_length=50)
|
|
58
54
|
return f'Sample ID: {self.id}\nInput: {input_text}\nTarget: {self.target}'
|
|
59
55
|
|
|
60
56
|
|
|
@@ -230,6 +226,8 @@ class MemoryDataset(Dataset):
|
|
|
230
226
|
self._shuffled = True
|
|
231
227
|
|
|
232
228
|
def shuffle_choices(self, seed: Optional[int] = None) -> None:
|
|
229
|
+
from evalscope.utils.multi_choices import answer_character
|
|
230
|
+
|
|
233
231
|
rand = random.Random(seed)
|
|
234
232
|
for sample in self.samples:
|
|
235
233
|
if not sample.choices:
|
|
@@ -249,6 +247,8 @@ class MemoryDataset(Dataset):
|
|
|
249
247
|
sample.target = self._remap_target(sample.target, position_map=position_map)
|
|
250
248
|
|
|
251
249
|
def _remap_target(self, target: Union[str, List[str]], position_map: Dict[int, str]) -> Union[str, List[str]]:
|
|
250
|
+
from evalscope.utils.multi_choices import answer_index
|
|
251
|
+
|
|
252
252
|
if isinstance(target, list):
|
|
253
253
|
return [position_map[answer_index(t)] for t in target]
|
|
254
254
|
else:
|
evalscope/api/dataset/loader.py
CHANGED
|
@@ -126,7 +126,8 @@ class RemoteDataLoader(DataLoader):
|
|
|
126
126
|
self.limit = int(len(dataset) * self.limit)
|
|
127
127
|
elif isinstance(self.limit, int) and self.limit < 0:
|
|
128
128
|
raise ValueError('Limit must be a non-negative integer or a float between 0 and 1.')
|
|
129
|
-
dataset
|
|
129
|
+
if len(dataset) > self.limit:
|
|
130
|
+
dataset = dataset.select(range(self.limit))
|
|
130
131
|
|
|
131
132
|
# convert to list
|
|
132
133
|
dataset = dataset.to_list()
|
evalscope/api/evaluator/cache.py
CHANGED
|
@@ -299,6 +299,15 @@ class ModelResult(BaseModel):
|
|
|
299
299
|
completed=True, # Mark as completed since it was cached
|
|
300
300
|
)
|
|
301
301
|
|
|
302
|
+
def pretty_print(self) -> str:
|
|
303
|
+
"""
|
|
304
|
+
Generate a pretty-printed string representation of the model result.
|
|
305
|
+
|
|
306
|
+
Returns:
|
|
307
|
+
A string representation of the model result
|
|
308
|
+
"""
|
|
309
|
+
return self.model_dump_json(indent=2)
|
|
310
|
+
|
|
302
311
|
|
|
303
312
|
class ReviewResult(BaseModel):
|
|
304
313
|
"""
|
|
@@ -340,7 +349,7 @@ class ReviewResult(BaseModel):
|
|
|
340
349
|
|
|
341
350
|
return cls(
|
|
342
351
|
index=state.sample_id,
|
|
343
|
-
input=state.
|
|
352
|
+
input=state.input_markdown,
|
|
344
353
|
target=state.target,
|
|
345
354
|
sample_score=sample_score,
|
|
346
355
|
)
|
|
@@ -353,3 +362,17 @@ class ReviewResult(BaseModel):
|
|
|
353
362
|
The sample score object
|
|
354
363
|
"""
|
|
355
364
|
return self.sample_score
|
|
365
|
+
|
|
366
|
+
def pretty_print(self) -> str:
|
|
367
|
+
"""
|
|
368
|
+
Generate a pretty-printed string representation of the review result.
|
|
369
|
+
|
|
370
|
+
Returns:
|
|
371
|
+
A string representation of the review result
|
|
372
|
+
"""
|
|
373
|
+
output = [
|
|
374
|
+
f'Review Result for Sample {self.index}:',
|
|
375
|
+
f'Target: {self.target}',
|
|
376
|
+
f'Score: {self.sample_score.model_dump_json(indent=2)}',
|
|
377
|
+
]
|
|
378
|
+
return '\n'.join(output)
|
evalscope/api/evaluator/state.py
CHANGED
|
@@ -3,7 +3,7 @@ from random import Random
|
|
|
3
3
|
from typing import Any, Dict, List, Optional, Sequence, Union, overload
|
|
4
4
|
|
|
5
5
|
from evalscope.api.dataset import Sample
|
|
6
|
-
from evalscope.api.messages import ChatMessage, ChatMessageUser, messages_pretty_str
|
|
6
|
+
from evalscope.api.messages import ChatMessage, ChatMessageUser, messages_pretty_str, messages_to_markdown
|
|
7
7
|
from evalscope.api.model import ModelOutput
|
|
8
8
|
|
|
9
9
|
|
|
@@ -188,6 +188,17 @@ class TaskState:
|
|
|
188
188
|
else:
|
|
189
189
|
return messages_pretty_str(self._input)
|
|
190
190
|
|
|
191
|
+
@property
|
|
192
|
+
def input_markdown(self) -> str:
|
|
193
|
+
"""Get the input text as markdown.
|
|
194
|
+
|
|
195
|
+
For multi-modal content, images will be represented in markdown format.
|
|
196
|
+
"""
|
|
197
|
+
if isinstance(self._input, str):
|
|
198
|
+
return self._input
|
|
199
|
+
else:
|
|
200
|
+
return messages_to_markdown(self._input)
|
|
201
|
+
|
|
191
202
|
@property
|
|
192
203
|
def choices(self) -> Choices:
|
|
193
204
|
"""Choices for the sample, if applicable."""
|
|
@@ -262,3 +273,8 @@ class TaskState:
|
|
|
262
273
|
def target(self) -> str:
|
|
263
274
|
"""The scoring target for this `Sample`."""
|
|
264
275
|
return self._target.text
|
|
276
|
+
|
|
277
|
+
@target.setter
|
|
278
|
+
def target(self, text: str) -> None:
|
|
279
|
+
"""Set the target for review purposes."""
|
|
280
|
+
self._target = Target(text)
|
|
@@ -6,6 +6,7 @@ from .chat_message import (
|
|
|
6
6
|
ChatMessageUser,
|
|
7
7
|
dict_to_chat_message,
|
|
8
8
|
messages_pretty_str,
|
|
9
|
+
messages_to_markdown,
|
|
9
10
|
)
|
|
10
11
|
from .content import Content, ContentAudio, ContentData, ContentImage, ContentReasoning, ContentText, ContentVideo
|
|
11
12
|
from .utils import parse_content_with_reasoning
|
|
@@ -3,7 +3,7 @@ from pydantic import BaseModel, Field, JsonValue, model_validator
|
|
|
3
3
|
from typing import Any, Dict, List, Literal, Optional, Type, Union
|
|
4
4
|
|
|
5
5
|
from evalscope.api.tool import ToolCall, ToolCallError
|
|
6
|
-
from .content import Content, ContentReasoning, ContentText
|
|
6
|
+
from .content import Content, ContentAudio, ContentImage, ContentReasoning, ContentText
|
|
7
7
|
from .utils import parse_content_with_reasoning
|
|
8
8
|
|
|
9
9
|
|
|
@@ -184,7 +184,7 @@ def dict_to_chat_message(data: Dict[str, Any]) -> ChatMessage:
|
|
|
184
184
|
|
|
185
185
|
|
|
186
186
|
def messages_pretty_str(messages: List[ChatMessage]) -> str:
|
|
187
|
-
"""Pretty print a list of chat messages."""
|
|
187
|
+
"""Pretty print a list of chat messages. Without images or other multi-modal contents."""
|
|
188
188
|
output = []
|
|
189
189
|
for message in messages:
|
|
190
190
|
role = message.role.capitalize()
|
|
@@ -196,3 +196,53 @@ def messages_pretty_str(messages: List[ChatMessage]) -> str:
|
|
|
196
196
|
content += f'\nFunction: {message.function}'
|
|
197
197
|
output.append(f'**{role}**: {content}')
|
|
198
198
|
return '\n\n'.join(output)
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def messages_to_markdown(messages: List[ChatMessage], max_length: Optional[int] = None) -> str:
|
|
202
|
+
"""Convert a list of chat messages to markdown format.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
messages (List[ChatMessage]): The list of chat messages to convert.
|
|
206
|
+
max_length (Optional[int]): If provided, truncates the base64 string of images to this length.
|
|
207
|
+
"""
|
|
208
|
+
output = []
|
|
209
|
+
for message in messages:
|
|
210
|
+
role = message.role.capitalize()
|
|
211
|
+
|
|
212
|
+
# Start with role header
|
|
213
|
+
content_parts = [f'**{role}**: ']
|
|
214
|
+
|
|
215
|
+
# Handle content based on type
|
|
216
|
+
if isinstance(message.content, str):
|
|
217
|
+
content_parts.append(message.content)
|
|
218
|
+
else:
|
|
219
|
+
for content_item in message.content:
|
|
220
|
+
if isinstance(content_item, ContentText):
|
|
221
|
+
content_parts.append(content_item.text)
|
|
222
|
+
elif isinstance(content_item, ContentImage):
|
|
223
|
+
# Use markdown image syntax
|
|
224
|
+
image_base64 = content_item.image
|
|
225
|
+
if max_length and len(image_base64) > max_length:
|
|
226
|
+
image_base64 = image_base64[:max_length]
|
|
227
|
+
content_parts.append(f'')
|
|
228
|
+
elif isinstance(content_item, ContentAudio):
|
|
229
|
+
audio_base64 = content_item.audio
|
|
230
|
+
if max_length and len(audio_base64) > max_length:
|
|
231
|
+
audio_base64 = audio_base64[:max_length]
|
|
232
|
+
content_parts.append(f"<audio controls src='{audio_base64}'></audio>")
|
|
233
|
+
elif isinstance(content_item, ContentReasoning):
|
|
234
|
+
content_parts.append(f'**Reasoning:** {content_item.reasoning}')
|
|
235
|
+
|
|
236
|
+
# Add tool-specific information
|
|
237
|
+
if isinstance(message, ChatMessageTool):
|
|
238
|
+
if message.error:
|
|
239
|
+
content_parts.append(f'**Error:** {message.error.message}')
|
|
240
|
+
if message.function:
|
|
241
|
+
content_parts.append(f'**Function:** {message.function}')
|
|
242
|
+
elif isinstance(message, ChatMessageAssistant) and message.tool_calls:
|
|
243
|
+
for tool_call in message.tool_calls:
|
|
244
|
+
content_parts.append(f'**Tool Call:** {tool_call.function}')
|
|
245
|
+
|
|
246
|
+
output.append('\n'.join(content_parts))
|
|
247
|
+
|
|
248
|
+
return '\n\n'.join(output)
|
evalscope/api/metric/scorer.py
CHANGED
|
@@ -35,20 +35,28 @@ class Score(BaseModel):
|
|
|
35
35
|
"""Main score value."""
|
|
36
36
|
if self.main_score_name and self.main_score_name in self.value:
|
|
37
37
|
return self.value[self.main_score_name]
|
|
38
|
-
|
|
38
|
+
elif self.value:
|
|
39
|
+
# If main_score_name is not set or not found, use the first value and update main_score_name
|
|
40
|
+
first_key = next(iter(self.value))
|
|
41
|
+
self.main_score_name = first_key
|
|
42
|
+
return self.value[first_key]
|
|
43
|
+
return None
|
|
39
44
|
|
|
40
45
|
@main_value.setter
|
|
41
46
|
def main_value(self, value: Union[int, float, bool]):
|
|
42
47
|
"""Set the main score value."""
|
|
43
48
|
if self.main_score_name:
|
|
49
|
+
# If main_score_name is already set, use it
|
|
44
50
|
self.value[self.main_score_name] = value
|
|
51
|
+
elif self.value:
|
|
52
|
+
# If no main_score_name but value dict exists, use the first key
|
|
53
|
+
first_key = next(iter(self.value))
|
|
54
|
+
self.main_score_name = first_key
|
|
55
|
+
self.value[first_key] = value
|
|
45
56
|
else:
|
|
46
|
-
# If
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
self.value[first_key] = value
|
|
50
|
-
else:
|
|
51
|
-
self.value['default'] = value
|
|
57
|
+
# If neither main_score_name nor value dict exists, initialize both
|
|
58
|
+
self.main_score_name = 'default'
|
|
59
|
+
self.value[self.main_score_name] = value
|
|
52
60
|
|
|
53
61
|
|
|
54
62
|
class SampleScore(BaseModel):
|
evalscope/api/mixin/__init__.py
CHANGED