evalscope 1.0.0__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of evalscope might be problematic. Click here for more details.
- evalscope/api/benchmark/__init__.py +1 -1
- evalscope/api/benchmark/adapters/__init__.py +2 -0
- evalscope/api/benchmark/adapters/default_data_adapter.py +1 -0
- evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
- evalscope/api/benchmark/adapters/text2image_adapter.py +7 -6
- evalscope/api/benchmark/adapters/vision_language_adapter.py +6 -0
- evalscope/api/benchmark/benchmark.py +35 -0
- evalscope/api/benchmark/meta.py +6 -0
- evalscope/api/dataset/dataset.py +6 -6
- evalscope/api/dataset/loader.py +2 -1
- evalscope/api/evaluator/cache.py +24 -1
- evalscope/api/evaluator/state.py +12 -1
- evalscope/api/messages/__init__.py +1 -0
- evalscope/api/messages/chat_message.py +47 -2
- evalscope/api/metric/scorer.py +15 -7
- evalscope/api/mixin/__init__.py +0 -1
- evalscope/api/model/generate_config.py +1 -3
- evalscope/api/model/model.py +4 -1
- evalscope/app/app.py +3 -0
- evalscope/app/ui/single_model.py +3 -3
- evalscope/app/utils/data_utils.py +7 -7
- evalscope/app/utils/env_utils.py +12 -0
- evalscope/app/utils/text_utils.py +14 -12
- evalscope/arguments.py +2 -4
- evalscope/backend/opencompass/backend_manager.py +0 -2
- evalscope/backend/rag_eval/utils/embedding.py +9 -1
- evalscope/benchmarks/bfcl/bfcl_adapter.py +2 -6
- evalscope/benchmarks/bfcl/generation.py +2 -2
- evalscope/benchmarks/ceval/ceval_adapter.py +1 -2
- evalscope/benchmarks/data_collection/data_collection_adapter.py +23 -19
- evalscope/benchmarks/frames/frames_adapter.py +2 -1
- evalscope/benchmarks/general_arena/general_arena_adapter.py +5 -1
- evalscope/benchmarks/ifeval/instructions_util.py +2 -3
- evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
- evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
- evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
- evalscope/benchmarks/math_vista/math_vista_adapter.py +129 -0
- evalscope/benchmarks/mmmu/__init__.py +0 -0
- evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
- evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
- evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +129 -0
- evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +5 -1
- evalscope/benchmarks/tau_bench/generation.py +1 -1
- evalscope/benchmarks/tau_bench/tau_bench_adapter.py +15 -19
- evalscope/benchmarks/text2image/__init__.py +0 -0
- evalscope/benchmarks/{aigc/t2i → text2image}/evalmuse_adapter.py +3 -1
- evalscope/benchmarks/{aigc/t2i → text2image}/genai_bench_adapter.py +2 -2
- evalscope/benchmarks/{aigc/t2i → text2image}/general_t2i_adapter.py +1 -1
- evalscope/benchmarks/{aigc/t2i → text2image}/hpdv2_adapter.py +7 -2
- evalscope/benchmarks/{aigc/t2i → text2image}/tifa_adapter.py +1 -0
- evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +1 -2
- evalscope/cli/start_app.py +7 -1
- evalscope/cli/start_perf.py +7 -1
- evalscope/config.py +72 -13
- evalscope/constants.py +8 -0
- evalscope/evaluator/evaluator.py +6 -4
- evalscope/metrics/llm_judge.py +19 -7
- evalscope/models/image_edit_model.py +125 -0
- evalscope/models/model_apis.py +20 -0
- evalscope/models/openai_compatible.py +3 -0
- evalscope/models/text2image_model.py +2 -2
- evalscope/models/utils/openai.py +7 -4
- evalscope/perf/benchmark.py +2 -0
- evalscope/perf/utils/benchmark_util.py +8 -5
- evalscope/perf/utils/local_server.py +3 -0
- evalscope/report/__init__.py +0 -1
- evalscope/report/generator.py +8 -87
- evalscope/run.py +9 -5
- evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -4
- evalscope/utils/chat_service.py +1 -1
- evalscope/utils/import_utils.py +23 -1
- evalscope/utils/io_utils.py +42 -1
- evalscope/utils/model_utils.py +4 -3
- evalscope/utils/multi_choices.py +23 -6
- evalscope/version.py +2 -2
- {evalscope-1.0.0.dist-info → evalscope-1.0.1.dist-info}/METADATA +12 -15
- {evalscope-1.0.0.dist-info → evalscope-1.0.1.dist-info}/RECORD +94 -80
- tests/benchmark/test_eval.py +30 -31
- tests/benchmark/test_image_edit.py +65 -0
- tests/benchmark/test_vlm.py +80 -0
- tests/cli/test_all.py +83 -43
- tests/cli/test_collection.py +8 -5
- tests/cli/test_reasoning.py +81 -0
- tests/common.py +73 -0
- tests/perf/test_perf.py +4 -2
- tests/rag/test_clip_benchmark.py +0 -3
- evalscope/api/mixin/dataset_mixin.py +0 -105
- evalscope/benchmarks/aigc/i2i/general_i2i_adapter.py +0 -44
- tests/aigc/__init__.py +0 -1
- /evalscope/benchmarks/{aigc → image_edit}/__init__.py +0 -0
- /evalscope/benchmarks/{aigc/i2i → image_edit/gedit}/__init__.py +0 -0
- /evalscope/benchmarks/{aigc/t2i → math_vista}/__init__.py +0 -0
- {evalscope-1.0.0.dist-info → evalscope-1.0.1.dist-info}/LICENSE +0 -0
- {evalscope-1.0.0.dist-info → evalscope-1.0.1.dist-info}/WHEEL +0 -0
- {evalscope-1.0.0.dist-info → evalscope-1.0.1.dist-info}/entry_points.txt +0 -0
- {evalscope-1.0.0.dist-info → evalscope-1.0.1.dist-info}/top_level.txt +0 -0
- /tests/{aigc → benchmark}/test_t2i.py +0 -0
|
@@ -241,6 +241,7 @@ class DefaultDataAdapter(DataAdapter):
|
|
|
241
241
|
filter_func=self.sample_filter,
|
|
242
242
|
limit=self.limit if not self.reformat_subset else None, # Limit number of samples if specified
|
|
243
243
|
repeats=self.repeats, # Number of repetitions for each sample
|
|
244
|
+
shuffle=self.shuffle, # Shuffle dataset if enabled
|
|
244
245
|
shuffle_choices=self.shuffle_choices, # Shuffle choices if requested
|
|
245
246
|
data_source=self.dataset_hub, # Data source configuration
|
|
246
247
|
)
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from typing import Optional
|
|
3
|
+
|
|
4
|
+
from evalscope.constants import EvalType, FileConstants
|
|
5
|
+
from evalscope.utils import get_logger
|
|
6
|
+
from evalscope.utils.function_utils import thread_safe
|
|
7
|
+
from evalscope.utils.io_utils import jsonl_to_list
|
|
8
|
+
from .text2image_adapter import Text2ImageAdapter
|
|
9
|
+
|
|
10
|
+
logger = get_logger()
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ImageEditAdapter(Text2ImageAdapter):
|
|
14
|
+
"""
|
|
15
|
+
Support two methods:
|
|
16
|
+
1. Inference using modelscope pipeline
|
|
17
|
+
2. Load local inference jsonl file with key to corresponding prompt
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(self, **kwargs):
|
|
21
|
+
super().__init__(**kwargs)
|
|
22
|
+
|
|
23
|
+
self.local_file = self.extra_params.get('local_file', None)
|
|
24
|
+
self.id_key = self.extra_params.get('id_key', FileConstants.ID)
|
|
25
|
+
self.image_key = self.extra_params.get('image_key', FileConstants.IMAGE_PATH)
|
|
26
|
+
self.local_data = self.load_local_file()
|
|
27
|
+
|
|
28
|
+
def load_local_file(self) -> Optional[dict]:
|
|
29
|
+
if not self.local_file:
|
|
30
|
+
return None
|
|
31
|
+
|
|
32
|
+
# Load file and check
|
|
33
|
+
data_list = jsonl_to_list(self.local_file)
|
|
34
|
+
data_dict = {}
|
|
35
|
+
for record in data_list:
|
|
36
|
+
if self.image_key not in record:
|
|
37
|
+
raise ValueError(f"Image key '{self.image_key}' not found in record: {record}, file {self.local_file}")
|
|
38
|
+
if self.id_key not in record:
|
|
39
|
+
raise ValueError(f"ID key '{self.id_key}' not found in record: {record}, file {self.local_file}")
|
|
40
|
+
|
|
41
|
+
image_path = record[self.image_key]
|
|
42
|
+
if not os.path.isabs(image_path):
|
|
43
|
+
image_path = os.path.join(os.path.dirname(self.local_file), image_path)
|
|
44
|
+
if not os.path.exists(image_path):
|
|
45
|
+
raise FileNotFoundError(f"Image file '{image_path}' not found.")
|
|
46
|
+
|
|
47
|
+
data_dict[record[self.id_key]] = record
|
|
48
|
+
return data_dict
|
|
49
|
+
|
|
50
|
+
def get_image_path_from_id(self, image_id) -> Optional[str]:
|
|
51
|
+
if not self.local_file:
|
|
52
|
+
return None
|
|
53
|
+
|
|
54
|
+
record = self.local_data.get(image_id)
|
|
55
|
+
if not record:
|
|
56
|
+
return None
|
|
57
|
+
|
|
58
|
+
return record[self.image_key]
|
|
59
|
+
|
|
60
|
+
def _post_process_samples(self):
|
|
61
|
+
super()._post_process_samples()
|
|
62
|
+
|
|
63
|
+
# Add local image path if exists
|
|
64
|
+
for subset in self.test_dataset.keys():
|
|
65
|
+
for sample in self.test_dataset[subset]:
|
|
66
|
+
local_image_path = self.get_image_path_from_id(sample.metadata.get(FileConstants.ID))
|
|
67
|
+
if local_image_path:
|
|
68
|
+
sample.metadata[FileConstants.IMAGE_PATH] = local_image_path
|
|
69
|
+
|
|
70
|
+
def sample_filter(self, sample) -> bool:
|
|
71
|
+
"""
|
|
72
|
+
Filter samples based on metadata availability.
|
|
73
|
+
If local file is not available, all samples are considered valid.
|
|
74
|
+
Otherwise, only samples with valid metadata and image path are kept.
|
|
75
|
+
"""
|
|
76
|
+
if not self.local_data:
|
|
77
|
+
return True
|
|
78
|
+
else:
|
|
79
|
+
sample_id = sample.metadata.get(FileConstants.ID)
|
|
80
|
+
if (not sample_id) or (not self.get_image_path_from_id(sample_id)):
|
|
81
|
+
return False
|
|
82
|
+
return True
|
|
@@ -8,7 +8,7 @@ from evalscope.api.messages.content import ContentImage
|
|
|
8
8
|
from evalscope.api.metric import Score
|
|
9
9
|
from evalscope.api.model import ChatCompletionChoice, Model, ModelOutput
|
|
10
10
|
from evalscope.api.registry import get_metric
|
|
11
|
-
from evalscope.constants import EvalType
|
|
11
|
+
from evalscope.constants import EvalType, FileConstants
|
|
12
12
|
from evalscope.utils import get_logger
|
|
13
13
|
from evalscope.utils.function_utils import thread_safe
|
|
14
14
|
from .default_data_adapter import DefaultDataAdapter
|
|
@@ -27,11 +27,12 @@ class Text2ImageAdapter(DefaultDataAdapter):
|
|
|
27
27
|
return Sample(
|
|
28
28
|
input=[ChatMessageUser(content=record['prompt'])],
|
|
29
29
|
metadata={
|
|
30
|
-
'id': record['id'],
|
|
31
30
|
'prompt': record['prompt'],
|
|
32
31
|
'category': record.get('category', ''),
|
|
33
32
|
'tags': record.get('tags', []),
|
|
34
|
-
|
|
33
|
+
FileConstants.ID: record[FileConstants.ID],
|
|
34
|
+
FileConstants.IMAGE_PATH: record.get(FileConstants.IMAGE_PATH,
|
|
35
|
+
''), # Optional field for existing image path
|
|
35
36
|
}
|
|
36
37
|
)
|
|
37
38
|
|
|
@@ -83,7 +84,7 @@ class Text2ImageAdapter(DefaultDataAdapter):
|
|
|
83
84
|
completed=True,
|
|
84
85
|
)
|
|
85
86
|
else:
|
|
86
|
-
image_id = f
|
|
87
|
+
image_id = f'{sample.metadata.get(FileConstants.ID, sample.id)}_{sample.group_id}'
|
|
87
88
|
output_path = os.path.join(output_dir, 'images', f'{image_id}.png')
|
|
88
89
|
if not os.path.exists(os.path.dirname(output_path)):
|
|
89
90
|
os.makedirs(os.path.dirname(output_path))
|
|
@@ -96,7 +97,7 @@ class Text2ImageAdapter(DefaultDataAdapter):
|
|
|
96
97
|
with open(output_path, 'wb') as f:
|
|
97
98
|
f.write(base64.b64decode(image_base64))
|
|
98
99
|
|
|
99
|
-
sample.metadata[
|
|
100
|
+
sample.metadata[FileConstants.IMAGE_PATH] = output_path
|
|
100
101
|
return TaskState(
|
|
101
102
|
model=model.name,
|
|
102
103
|
sample=sample,
|
|
@@ -111,7 +112,7 @@ class Text2ImageAdapter(DefaultDataAdapter):
|
|
|
111
112
|
self, original_prediction: str, filtered_prediction: str, reference: str, task_state: TaskState
|
|
112
113
|
) -> Score:
|
|
113
114
|
# Get prediction and prompt from task state
|
|
114
|
-
image_path = task_state.metadata.get(
|
|
115
|
+
image_path = task_state.metadata.get(FileConstants.IMAGE_PATH, original_prediction)
|
|
115
116
|
prompt = task_state.input[0].content
|
|
116
117
|
meta = task_state.metadata
|
|
117
118
|
|
|
@@ -170,6 +170,13 @@ class DataAdapter(LLMJudgeMixin, ABC):
|
|
|
170
170
|
"""
|
|
171
171
|
return self._benchmark_meta.default_subset
|
|
172
172
|
|
|
173
|
+
@default_subset.setter
|
|
174
|
+
def default_subset(self, value: str):
|
|
175
|
+
"""
|
|
176
|
+
Set the default subset of the benchmark.
|
|
177
|
+
"""
|
|
178
|
+
self._benchmark_meta.default_subset = value
|
|
179
|
+
|
|
173
180
|
@property
|
|
174
181
|
def few_shot_num(self) -> int:
|
|
175
182
|
"""
|
|
@@ -299,6 +306,34 @@ class DataAdapter(LLMJudgeMixin, ABC):
|
|
|
299
306
|
"""
|
|
300
307
|
return self._task_config.seed
|
|
301
308
|
|
|
309
|
+
@property
|
|
310
|
+
def shuffle(self) -> bool:
|
|
311
|
+
"""
|
|
312
|
+
Return whether to shuffle the dataset before evaluation.
|
|
313
|
+
"""
|
|
314
|
+
return self._benchmark_meta.shuffle
|
|
315
|
+
|
|
316
|
+
@shuffle.setter
|
|
317
|
+
def shuffle(self, value: bool):
|
|
318
|
+
"""
|
|
319
|
+
Set whether to shuffle the dataset before evaluation.
|
|
320
|
+
"""
|
|
321
|
+
self._benchmark_meta.shuffle = value
|
|
322
|
+
|
|
323
|
+
@property
|
|
324
|
+
def shuffle_choices(self) -> bool:
|
|
325
|
+
"""
|
|
326
|
+
Return whether to shuffle the choices in multiple-choice datasets.
|
|
327
|
+
"""
|
|
328
|
+
return self._benchmark_meta.shuffle_choices
|
|
329
|
+
|
|
330
|
+
@shuffle_choices.setter
|
|
331
|
+
def shuffle_choices(self, value: bool):
|
|
332
|
+
"""
|
|
333
|
+
Set whether to shuffle the choices in multiple-choice datasets.
|
|
334
|
+
"""
|
|
335
|
+
self._benchmark_meta.shuffle_choices = value
|
|
336
|
+
|
|
302
337
|
@contextlib.contextmanager
|
|
303
338
|
def _temporary_attribute(self, attr_name: str, new_value):
|
|
304
339
|
"""
|
evalscope/api/benchmark/meta.py
CHANGED
|
@@ -73,6 +73,12 @@ class BenchmarkMeta:
|
|
|
73
73
|
aggregation: str = 'mean'
|
|
74
74
|
""" Aggregation function for the metrics. Default is 'mean'. Can be 'mean', 'pass@<k>' or a custom function name."""
|
|
75
75
|
|
|
76
|
+
shuffle: bool = False
|
|
77
|
+
"""Whether to shuffle the dataset before evaluation."""
|
|
78
|
+
|
|
79
|
+
shuffle_choices: bool = False
|
|
80
|
+
"""Whether to shuffle the choices in multiple-choice datasets."""
|
|
81
|
+
|
|
76
82
|
extra_params: Dict = field(default_factory=dict)
|
|
77
83
|
""" Additional parameters for the benchmark."""
|
|
78
84
|
|
evalscope/api/dataset/dataset.py
CHANGED
|
@@ -5,9 +5,8 @@ from dataclasses import dataclass, field
|
|
|
5
5
|
from pydantic import BaseModel, Field
|
|
6
6
|
from typing import Any, Callable, Dict, Iterator, List, Optional, Sequence, Union
|
|
7
7
|
|
|
8
|
-
from evalscope.api.messages import ChatMessage,
|
|
8
|
+
from evalscope.api.messages import ChatMessage, messages_to_markdown
|
|
9
9
|
from evalscope.api.tool import ToolInfo
|
|
10
|
-
from evalscope.utils.multi_choices import answer_character, answer_index
|
|
11
10
|
|
|
12
11
|
|
|
13
12
|
class Sample(BaseModel):
|
|
@@ -31,9 +30,6 @@ class Sample(BaseModel):
|
|
|
31
30
|
tools: Optional[List[ToolInfo]] = None
|
|
32
31
|
"""List of tools available to the model during inference (optional)."""
|
|
33
32
|
|
|
34
|
-
category: Optional[str] = None
|
|
35
|
-
"""Category of the sample (optional)."""
|
|
36
|
-
|
|
37
33
|
subset_key: Optional[str] = None
|
|
38
34
|
"""Key for the subset this sample belongs to, used for generating subsets (optional)."""
|
|
39
35
|
|
|
@@ -54,7 +50,7 @@ class Sample(BaseModel):
|
|
|
54
50
|
if isinstance(self.input, str):
|
|
55
51
|
input_text = self.input
|
|
56
52
|
else:
|
|
57
|
-
input_text =
|
|
53
|
+
input_text = messages_to_markdown(self.input, max_length=50)
|
|
58
54
|
return f'Sample ID: {self.id}\nInput: {input_text}\nTarget: {self.target}'
|
|
59
55
|
|
|
60
56
|
|
|
@@ -230,6 +226,8 @@ class MemoryDataset(Dataset):
|
|
|
230
226
|
self._shuffled = True
|
|
231
227
|
|
|
232
228
|
def shuffle_choices(self, seed: Optional[int] = None) -> None:
|
|
229
|
+
from evalscope.utils.multi_choices import answer_character
|
|
230
|
+
|
|
233
231
|
rand = random.Random(seed)
|
|
234
232
|
for sample in self.samples:
|
|
235
233
|
if not sample.choices:
|
|
@@ -249,6 +247,8 @@ class MemoryDataset(Dataset):
|
|
|
249
247
|
sample.target = self._remap_target(sample.target, position_map=position_map)
|
|
250
248
|
|
|
251
249
|
def _remap_target(self, target: Union[str, List[str]], position_map: Dict[int, str]) -> Union[str, List[str]]:
|
|
250
|
+
from evalscope.utils.multi_choices import answer_index
|
|
251
|
+
|
|
252
252
|
if isinstance(target, list):
|
|
253
253
|
return [position_map[answer_index(t)] for t in target]
|
|
254
254
|
else:
|
evalscope/api/dataset/loader.py
CHANGED
|
@@ -126,7 +126,8 @@ class RemoteDataLoader(DataLoader):
|
|
|
126
126
|
self.limit = int(len(dataset) * self.limit)
|
|
127
127
|
elif isinstance(self.limit, int) and self.limit < 0:
|
|
128
128
|
raise ValueError('Limit must be a non-negative integer or a float between 0 and 1.')
|
|
129
|
-
dataset
|
|
129
|
+
if len(dataset) > self.limit:
|
|
130
|
+
dataset = dataset.select(range(self.limit))
|
|
130
131
|
|
|
131
132
|
# convert to list
|
|
132
133
|
dataset = dataset.to_list()
|
evalscope/api/evaluator/cache.py
CHANGED
|
@@ -299,6 +299,15 @@ class ModelResult(BaseModel):
|
|
|
299
299
|
completed=True, # Mark as completed since it was cached
|
|
300
300
|
)
|
|
301
301
|
|
|
302
|
+
def pretty_print(self) -> str:
|
|
303
|
+
"""
|
|
304
|
+
Generate a pretty-printed string representation of the model result.
|
|
305
|
+
|
|
306
|
+
Returns:
|
|
307
|
+
A string representation of the model result
|
|
308
|
+
"""
|
|
309
|
+
return self.model_dump_json(indent=2)
|
|
310
|
+
|
|
302
311
|
|
|
303
312
|
class ReviewResult(BaseModel):
|
|
304
313
|
"""
|
|
@@ -340,7 +349,7 @@ class ReviewResult(BaseModel):
|
|
|
340
349
|
|
|
341
350
|
return cls(
|
|
342
351
|
index=state.sample_id,
|
|
343
|
-
input=state.
|
|
352
|
+
input=state.input_markdown,
|
|
344
353
|
target=state.target,
|
|
345
354
|
sample_score=sample_score,
|
|
346
355
|
)
|
|
@@ -353,3 +362,17 @@ class ReviewResult(BaseModel):
|
|
|
353
362
|
The sample score object
|
|
354
363
|
"""
|
|
355
364
|
return self.sample_score
|
|
365
|
+
|
|
366
|
+
def pretty_print(self) -> str:
|
|
367
|
+
"""
|
|
368
|
+
Generate a pretty-printed string representation of the review result.
|
|
369
|
+
|
|
370
|
+
Returns:
|
|
371
|
+
A string representation of the review result
|
|
372
|
+
"""
|
|
373
|
+
output = [
|
|
374
|
+
f'Review Result for Sample {self.index}:',
|
|
375
|
+
f'Target: {self.target}',
|
|
376
|
+
f'Score: {self.sample_score.model_dump_json(indent=2)}',
|
|
377
|
+
]
|
|
378
|
+
return '\n'.join(output)
|
evalscope/api/evaluator/state.py
CHANGED
|
@@ -3,7 +3,7 @@ from random import Random
|
|
|
3
3
|
from typing import Any, Dict, List, Optional, Sequence, Union, overload
|
|
4
4
|
|
|
5
5
|
from evalscope.api.dataset import Sample
|
|
6
|
-
from evalscope.api.messages import ChatMessage, ChatMessageUser, messages_pretty_str
|
|
6
|
+
from evalscope.api.messages import ChatMessage, ChatMessageUser, messages_pretty_str, messages_to_markdown
|
|
7
7
|
from evalscope.api.model import ModelOutput
|
|
8
8
|
|
|
9
9
|
|
|
@@ -188,6 +188,17 @@ class TaskState:
|
|
|
188
188
|
else:
|
|
189
189
|
return messages_pretty_str(self._input)
|
|
190
190
|
|
|
191
|
+
@property
|
|
192
|
+
def input_markdown(self) -> str:
|
|
193
|
+
"""Get the input text as markdown.
|
|
194
|
+
|
|
195
|
+
For multi-modal content, images will be represented in markdown format.
|
|
196
|
+
"""
|
|
197
|
+
if isinstance(self._input, str):
|
|
198
|
+
return self._input
|
|
199
|
+
else:
|
|
200
|
+
return messages_to_markdown(self._input)
|
|
201
|
+
|
|
191
202
|
@property
|
|
192
203
|
def choices(self) -> Choices:
|
|
193
204
|
"""Choices for the sample, if applicable."""
|
|
@@ -6,6 +6,7 @@ from .chat_message import (
|
|
|
6
6
|
ChatMessageUser,
|
|
7
7
|
dict_to_chat_message,
|
|
8
8
|
messages_pretty_str,
|
|
9
|
+
messages_to_markdown,
|
|
9
10
|
)
|
|
10
11
|
from .content import Content, ContentAudio, ContentData, ContentImage, ContentReasoning, ContentText, ContentVideo
|
|
11
12
|
from .utils import parse_content_with_reasoning
|
|
@@ -3,7 +3,7 @@ from pydantic import BaseModel, Field, JsonValue, model_validator
|
|
|
3
3
|
from typing import Any, Dict, List, Literal, Optional, Type, Union
|
|
4
4
|
|
|
5
5
|
from evalscope.api.tool import ToolCall, ToolCallError
|
|
6
|
-
from .content import Content, ContentReasoning, ContentText
|
|
6
|
+
from .content import Content, ContentImage, ContentReasoning, ContentText
|
|
7
7
|
from .utils import parse_content_with_reasoning
|
|
8
8
|
|
|
9
9
|
|
|
@@ -184,7 +184,7 @@ def dict_to_chat_message(data: Dict[str, Any]) -> ChatMessage:
|
|
|
184
184
|
|
|
185
185
|
|
|
186
186
|
def messages_pretty_str(messages: List[ChatMessage]) -> str:
|
|
187
|
-
"""Pretty print a list of chat messages."""
|
|
187
|
+
"""Pretty print a list of chat messages. Without images or other multi-modal contents."""
|
|
188
188
|
output = []
|
|
189
189
|
for message in messages:
|
|
190
190
|
role = message.role.capitalize()
|
|
@@ -196,3 +196,48 @@ def messages_pretty_str(messages: List[ChatMessage]) -> str:
|
|
|
196
196
|
content += f'\nFunction: {message.function}'
|
|
197
197
|
output.append(f'**{role}**: {content}')
|
|
198
198
|
return '\n\n'.join(output)
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def messages_to_markdown(messages: List[ChatMessage], max_length: Optional[int] = None) -> str:
|
|
202
|
+
"""Convert a list of chat messages to markdown format.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
messages (List[ChatMessage]): The list of chat messages to convert.
|
|
206
|
+
max_length (Optional[int]): If provided, truncates the base64 string of images to this length.
|
|
207
|
+
"""
|
|
208
|
+
output = []
|
|
209
|
+
for message in messages:
|
|
210
|
+
role = message.role.capitalize()
|
|
211
|
+
|
|
212
|
+
# Start with role header
|
|
213
|
+
content_parts = [f'**{role}**: ']
|
|
214
|
+
|
|
215
|
+
# Handle content based on type
|
|
216
|
+
if isinstance(message.content, str):
|
|
217
|
+
content_parts.append(message.content)
|
|
218
|
+
else:
|
|
219
|
+
for content_item in message.content:
|
|
220
|
+
if isinstance(content_item, ContentText):
|
|
221
|
+
content_parts.append(content_item.text)
|
|
222
|
+
elif isinstance(content_item, ContentImage):
|
|
223
|
+
# Use markdown image syntax
|
|
224
|
+
image_base64 = content_item.image
|
|
225
|
+
if max_length and len(image_base64) > max_length:
|
|
226
|
+
image_base64 = image_base64[:max_length]
|
|
227
|
+
content_parts.append(f'')
|
|
228
|
+
elif isinstance(content_item, ContentReasoning):
|
|
229
|
+
content_parts.append(f'**Reasoning:** {content_item.reasoning}')
|
|
230
|
+
|
|
231
|
+
# Add tool-specific information
|
|
232
|
+
if isinstance(message, ChatMessageTool):
|
|
233
|
+
if message.error:
|
|
234
|
+
content_parts.append(f'**Error:** {message.error.message}')
|
|
235
|
+
if message.function:
|
|
236
|
+
content_parts.append(f'**Function:** {message.function}')
|
|
237
|
+
elif isinstance(message, ChatMessageAssistant) and message.tool_calls:
|
|
238
|
+
for tool_call in message.tool_calls:
|
|
239
|
+
content_parts.append(f'**Tool Call:** {tool_call.function}')
|
|
240
|
+
|
|
241
|
+
output.append('\n'.join(content_parts))
|
|
242
|
+
|
|
243
|
+
return '\n\n'.join(output)
|
evalscope/api/metric/scorer.py
CHANGED
|
@@ -35,20 +35,28 @@ class Score(BaseModel):
|
|
|
35
35
|
"""Main score value."""
|
|
36
36
|
if self.main_score_name and self.main_score_name in self.value:
|
|
37
37
|
return self.value[self.main_score_name]
|
|
38
|
-
|
|
38
|
+
elif self.value:
|
|
39
|
+
# If main_score_name is not set or not found, use the first value and update main_score_name
|
|
40
|
+
first_key = next(iter(self.value))
|
|
41
|
+
self.main_score_name = first_key
|
|
42
|
+
return self.value[first_key]
|
|
43
|
+
return None
|
|
39
44
|
|
|
40
45
|
@main_value.setter
|
|
41
46
|
def main_value(self, value: Union[int, float, bool]):
|
|
42
47
|
"""Set the main score value."""
|
|
43
48
|
if self.main_score_name:
|
|
49
|
+
# If main_score_name is already set, use it
|
|
44
50
|
self.value[self.main_score_name] = value
|
|
51
|
+
elif self.value:
|
|
52
|
+
# If no main_score_name but value dict exists, use the first key
|
|
53
|
+
first_key = next(iter(self.value))
|
|
54
|
+
self.main_score_name = first_key
|
|
55
|
+
self.value[first_key] = value
|
|
45
56
|
else:
|
|
46
|
-
# If
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
self.value[first_key] = value
|
|
50
|
-
else:
|
|
51
|
-
self.value['default'] = value
|
|
57
|
+
# If neither main_score_name nor value dict exists, initialize both
|
|
58
|
+
self.main_score_name = 'default'
|
|
59
|
+
self.value[self.main_score_name] = value
|
|
52
60
|
|
|
53
61
|
|
|
54
62
|
class SampleScore(BaseModel):
|
evalscope/api/mixin/__init__.py
CHANGED
|
@@ -25,9 +25,7 @@ class ResponseSchema(BaseModel):
|
|
|
25
25
|
|
|
26
26
|
class GenerateConfig(BaseModel):
|
|
27
27
|
"""Model generation options."""
|
|
28
|
-
|
|
29
|
-
max_retries: Optional[int] = Field(default=None)
|
|
30
|
-
"""Maximum number of times to retry request (defaults to unlimited)."""
|
|
28
|
+
model_config = {'extra': 'allow'}
|
|
31
29
|
|
|
32
30
|
timeout: Optional[int] = Field(default=None)
|
|
33
31
|
"""Request timeout (in seconds)."""
|
evalscope/api/model/model.py
CHANGED
|
@@ -318,7 +318,7 @@ def get_model_with_task_config(task_config: 'TaskConfig') -> Model:
|
|
|
318
318
|
|
|
319
319
|
@thread_safe
|
|
320
320
|
def get_model(
|
|
321
|
-
model: str,
|
|
321
|
+
model: Union[str, Model, ModelAPI],
|
|
322
322
|
eval_type: str,
|
|
323
323
|
base_url: Optional[str] = None,
|
|
324
324
|
api_key: Optional[str] = None,
|
|
@@ -346,6 +346,9 @@ def get_model(
|
|
|
346
346
|
if isinstance(model, Model):
|
|
347
347
|
return model
|
|
348
348
|
|
|
349
|
+
if isinstance(model, ModelAPI):
|
|
350
|
+
return Model(model, config, model_args)
|
|
351
|
+
|
|
349
352
|
# see if we can return a memoized model instance
|
|
350
353
|
# (exclude mockllm since custom_outputs is an infinite generator)
|
|
351
354
|
model_cache_key: str = ''
|
evalscope/app/app.py
CHANGED
|
@@ -6,6 +6,7 @@ import argparse
|
|
|
6
6
|
from evalscope.utils.logger import configure_logging
|
|
7
7
|
from .arguments import add_argument
|
|
8
8
|
from .ui import create_app_ui
|
|
9
|
+
from .utils.env_utils import setup_env
|
|
9
10
|
|
|
10
11
|
|
|
11
12
|
def create_app(args: argparse.Namespace):
|
|
@@ -17,6 +18,8 @@ def create_app(args: argparse.Namespace):
|
|
|
17
18
|
"""
|
|
18
19
|
configure_logging(debug=args.debug)
|
|
19
20
|
|
|
21
|
+
setup_env(args)
|
|
22
|
+
|
|
20
23
|
demo = create_app_ui(args)
|
|
21
24
|
|
|
22
25
|
demo.launch(
|
evalscope/app/ui/single_model.py
CHANGED
|
@@ -198,9 +198,9 @@ def create_single_model_tab(sidebar: 'SidebarComponents', lang: str):
|
|
|
198
198
|
|
|
199
199
|
# Process the data for display
|
|
200
200
|
input_md = row['Input'] + '\n\n' + process_model_prediction(row['Metadata'])
|
|
201
|
-
generated_md =
|
|
202
|
-
gold_md =
|
|
203
|
-
pred_md =
|
|
201
|
+
generated_md = convert_markdown_image(row['Generated'])
|
|
202
|
+
gold_md = convert_markdown_image(row['Gold'])
|
|
203
|
+
pred_md = process_model_prediction(row['Pred'])
|
|
204
204
|
score_md = process_json_content(row['Score'])
|
|
205
205
|
nscore_val = float(row['NScore']) if not pd.isna(row['NScore']) else 0.0
|
|
206
206
|
|
|
@@ -2,7 +2,6 @@
|
|
|
2
2
|
Data loading and processing utilities for the Evalscope dashboard.
|
|
3
3
|
"""
|
|
4
4
|
import glob
|
|
5
|
-
import numpy as np
|
|
6
5
|
import os
|
|
7
6
|
import pandas as pd
|
|
8
7
|
from typing import Any, Dict, List, Union
|
|
@@ -160,17 +159,18 @@ def get_model_prediction(work_dir: str, model_name: str, dataset_name: str, subs
|
|
|
160
159
|
if f'{sample_dataset_name}/{sample_subset_name}' != subset_name:
|
|
161
160
|
continue
|
|
162
161
|
|
|
163
|
-
prediction = sample_score.score.prediction
|
|
164
|
-
target = review_result.target
|
|
165
|
-
extracted_prediction = sample_score.score.extracted_prediction
|
|
166
162
|
score = sample_score.score
|
|
163
|
+
metadata = sample_score.sample_metadata
|
|
164
|
+
prediction = score.prediction
|
|
165
|
+
target = review_result.target
|
|
166
|
+
extracted_prediction = score.extracted_prediction
|
|
167
167
|
raw_d = {
|
|
168
168
|
'Index': str(review_result.index),
|
|
169
169
|
'Input': review_result.input.replace('\n', '\n\n'), # for markdown
|
|
170
|
-
'Metadata':
|
|
171
|
-
'Generated': prediction
|
|
170
|
+
'Metadata': metadata,
|
|
171
|
+
'Generated': prediction,
|
|
172
172
|
'Gold': target,
|
|
173
|
-
'Pred': extracted_prediction,
|
|
173
|
+
'Pred': extracted_prediction if extracted_prediction != prediction else '*Same as Generated*',
|
|
174
174
|
'Score': score.model_dump(exclude_none=True),
|
|
175
175
|
'NScore': normalize_score(score.main_value)
|
|
176
176
|
}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
# flake8: noqa
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def setup_env(args):
|
|
6
|
+
compat_dsw_gradio(args)
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def compat_dsw_gradio(args) -> None:
|
|
10
|
+
if ('JUPYTER_NAME' in os.environ) and ('dsw-'
|
|
11
|
+
in os.environ['JUPYTER_NAME']) and ('GRADIO_ROOT_PATH' not in os.environ):
|
|
12
|
+
os.environ['GRADIO_ROOT_PATH'] = f"/{os.environ['JUPYTER_NAME']}/proxy/{args.server_port}"
|
|
@@ -2,11 +2,9 @@
|
|
|
2
2
|
Text processing utilities for the Evalscope dashboard.
|
|
3
3
|
"""
|
|
4
4
|
import json
|
|
5
|
-
import numpy as np
|
|
6
5
|
import os
|
|
7
|
-
import pandas as pd
|
|
8
6
|
import re
|
|
9
|
-
from typing import Any, Dict, List
|
|
7
|
+
from typing import Any, Dict, List, Optional
|
|
10
8
|
|
|
11
9
|
from evalscope.utils.logger import get_logger
|
|
12
10
|
from ..constants import LATEX_DELIMITERS
|
|
@@ -14,15 +12,19 @@ from ..constants import LATEX_DELIMITERS
|
|
|
14
12
|
logger = get_logger()
|
|
15
13
|
|
|
16
14
|
|
|
17
|
-
def convert_markdown_image(text):
|
|
18
|
-
if
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
text = os.path.abspath(text)
|
|
23
|
-
image_tag = f''
|
|
24
|
-
logger.debug(f'Converting image path to markdown: {text} -> {image_tag}')
|
|
15
|
+
def convert_markdown_image(text: str):
|
|
16
|
+
if text.startswith('data:image'):
|
|
17
|
+
# Convert base64 image data to a markdown image tag
|
|
18
|
+
image_tag = f''
|
|
19
|
+
logger.debug(f'Converting base64 image data to markdown: {text[:30]}... -> {image_tag[:40]}...')
|
|
25
20
|
return image_tag
|
|
21
|
+
elif os.path.isfile(text):
|
|
22
|
+
# Convert the image path to a markdown image tag
|
|
23
|
+
if text.endswith('.png') or text.endswith('.jpg') or text.endswith('.jpeg'):
|
|
24
|
+
text = os.path.abspath(text)
|
|
25
|
+
image_tag = f''
|
|
26
|
+
logger.debug(f'Converting image path to markdown: {text} -> {image_tag}')
|
|
27
|
+
return image_tag
|
|
26
28
|
return text
|
|
27
29
|
|
|
28
30
|
|
|
@@ -85,7 +87,7 @@ def process_model_prediction_old(item: Any, max_length: int = 2048) -> str:
|
|
|
85
87
|
return result
|
|
86
88
|
|
|
87
89
|
|
|
88
|
-
def process_model_prediction(item: Any, max_length: int =
|
|
90
|
+
def process_model_prediction(item: Any, max_length: Optional[int] = None) -> str:
|
|
89
91
|
if isinstance(item, (dict, list)):
|
|
90
92
|
result = json.dumps(item, ensure_ascii=False, indent=2)
|
|
91
93
|
result = f'```json\n{result}\n```'
|
evalscope/arguments.py
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
import argparse
|
|
3
3
|
import json
|
|
4
4
|
|
|
5
|
-
from evalscope.constants import EvalBackend, EvalType, JudgeStrategy, ModelTask
|
|
5
|
+
from evalscope.constants import EvalBackend, EvalType, JudgeStrategy, ModelTask
|
|
6
6
|
|
|
7
7
|
|
|
8
8
|
class ParseStrArgsAction(argparse.Action):
|
|
@@ -60,8 +60,7 @@ def add_argument(parser: argparse.ArgumentParser):
|
|
|
60
60
|
parser.add_argument('--generation-config', type=str, action=ParseStrArgsAction, help='The generation config, should be a string.') # noqa: E501
|
|
61
61
|
|
|
62
62
|
# Evaluation-related arguments
|
|
63
|
-
parser.add_argument('--eval-type', type=str, help='The type for evaluating.'
|
|
64
|
-
choices=[EvalType.CHECKPOINT, EvalType.CUSTOM, EvalType.SERVICE])
|
|
63
|
+
parser.add_argument('--eval-type', type=str, help='The type for evaluating.')
|
|
65
64
|
parser.add_argument('--eval-backend', type=str, help='The evaluation backend to use.',
|
|
66
65
|
choices=[EvalBackend.NATIVE, EvalBackend.OPEN_COMPASS, EvalBackend.VLM_EVAL_KIT, EvalBackend.RAG_EVAL]) # noqa: E501
|
|
67
66
|
parser.add_argument('--eval-config', type=str, required=False, help='The eval task config file path for evaluation backend.') # noqa: E501
|
|
@@ -77,7 +76,6 @@ def add_argument(parser: argparse.ArgumentParser):
|
|
|
77
76
|
# Debug and runtime mode arguments
|
|
78
77
|
parser.add_argument('--ignore-errors', action='store_true', default=False, help='Ignore errors during evaluation.')
|
|
79
78
|
parser.add_argument('--debug', action='store_true', default=False, help='Debug mode, will print information for debugging.') # noqa: E501
|
|
80
|
-
parser.add_argument('--dry-run', action='store_true', default=False, help='Dry run in single processing mode.')
|
|
81
79
|
parser.add_argument('--seed', type=int, default=42, help='Random seed for reproducibility.')
|
|
82
80
|
parser.add_argument('--api-key', type=str, default='EMPTY', help='The API key for the remote API model.')
|
|
83
81
|
parser.add_argument('--api-url', type=str, default=None, help='The API url for the remote API model.')
|