evalscope 1.0.0__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (97) hide show
  1. evalscope/api/benchmark/__init__.py +1 -1
  2. evalscope/api/benchmark/adapters/__init__.py +2 -0
  3. evalscope/api/benchmark/adapters/default_data_adapter.py +1 -0
  4. evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
  5. evalscope/api/benchmark/adapters/text2image_adapter.py +7 -6
  6. evalscope/api/benchmark/adapters/vision_language_adapter.py +6 -0
  7. evalscope/api/benchmark/benchmark.py +35 -0
  8. evalscope/api/benchmark/meta.py +6 -0
  9. evalscope/api/dataset/dataset.py +6 -6
  10. evalscope/api/dataset/loader.py +2 -1
  11. evalscope/api/evaluator/cache.py +24 -1
  12. evalscope/api/evaluator/state.py +12 -1
  13. evalscope/api/messages/__init__.py +1 -0
  14. evalscope/api/messages/chat_message.py +47 -2
  15. evalscope/api/metric/scorer.py +15 -7
  16. evalscope/api/mixin/__init__.py +0 -1
  17. evalscope/api/model/generate_config.py +1 -3
  18. evalscope/api/model/model.py +4 -1
  19. evalscope/app/app.py +3 -0
  20. evalscope/app/ui/single_model.py +3 -3
  21. evalscope/app/utils/data_utils.py +7 -7
  22. evalscope/app/utils/env_utils.py +12 -0
  23. evalscope/app/utils/text_utils.py +14 -12
  24. evalscope/arguments.py +2 -4
  25. evalscope/backend/opencompass/backend_manager.py +0 -2
  26. evalscope/backend/rag_eval/utils/embedding.py +9 -1
  27. evalscope/benchmarks/bfcl/bfcl_adapter.py +2 -6
  28. evalscope/benchmarks/bfcl/generation.py +2 -2
  29. evalscope/benchmarks/ceval/ceval_adapter.py +1 -2
  30. evalscope/benchmarks/data_collection/data_collection_adapter.py +23 -19
  31. evalscope/benchmarks/frames/frames_adapter.py +2 -1
  32. evalscope/benchmarks/general_arena/general_arena_adapter.py +5 -1
  33. evalscope/benchmarks/ifeval/instructions_util.py +2 -3
  34. evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
  35. evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
  36. evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
  37. evalscope/benchmarks/math_vista/math_vista_adapter.py +129 -0
  38. evalscope/benchmarks/mmmu/__init__.py +0 -0
  39. evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
  40. evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
  41. evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +129 -0
  42. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +5 -1
  43. evalscope/benchmarks/tau_bench/generation.py +1 -1
  44. evalscope/benchmarks/tau_bench/tau_bench_adapter.py +15 -19
  45. evalscope/benchmarks/text2image/__init__.py +0 -0
  46. evalscope/benchmarks/{aigc/t2i → text2image}/evalmuse_adapter.py +3 -1
  47. evalscope/benchmarks/{aigc/t2i → text2image}/genai_bench_adapter.py +2 -2
  48. evalscope/benchmarks/{aigc/t2i → text2image}/general_t2i_adapter.py +1 -1
  49. evalscope/benchmarks/{aigc/t2i → text2image}/hpdv2_adapter.py +7 -2
  50. evalscope/benchmarks/{aigc/t2i → text2image}/tifa_adapter.py +1 -0
  51. evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +1 -2
  52. evalscope/cli/start_app.py +7 -1
  53. evalscope/cli/start_perf.py +7 -1
  54. evalscope/config.py +72 -13
  55. evalscope/constants.py +8 -0
  56. evalscope/evaluator/evaluator.py +6 -4
  57. evalscope/metrics/llm_judge.py +19 -7
  58. evalscope/models/image_edit_model.py +125 -0
  59. evalscope/models/model_apis.py +20 -0
  60. evalscope/models/openai_compatible.py +3 -0
  61. evalscope/models/text2image_model.py +2 -2
  62. evalscope/models/utils/openai.py +7 -4
  63. evalscope/perf/benchmark.py +2 -0
  64. evalscope/perf/utils/benchmark_util.py +8 -5
  65. evalscope/perf/utils/local_server.py +3 -0
  66. evalscope/report/__init__.py +0 -1
  67. evalscope/report/generator.py +8 -87
  68. evalscope/run.py +9 -5
  69. evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -4
  70. evalscope/utils/chat_service.py +1 -1
  71. evalscope/utils/import_utils.py +23 -1
  72. evalscope/utils/io_utils.py +42 -1
  73. evalscope/utils/model_utils.py +4 -3
  74. evalscope/utils/multi_choices.py +23 -6
  75. evalscope/version.py +2 -2
  76. {evalscope-1.0.0.dist-info → evalscope-1.0.1.dist-info}/METADATA +12 -15
  77. {evalscope-1.0.0.dist-info → evalscope-1.0.1.dist-info}/RECORD +94 -80
  78. tests/benchmark/test_eval.py +30 -31
  79. tests/benchmark/test_image_edit.py +65 -0
  80. tests/benchmark/test_vlm.py +80 -0
  81. tests/cli/test_all.py +83 -43
  82. tests/cli/test_collection.py +8 -5
  83. tests/cli/test_reasoning.py +81 -0
  84. tests/common.py +73 -0
  85. tests/perf/test_perf.py +4 -2
  86. tests/rag/test_clip_benchmark.py +0 -3
  87. evalscope/api/mixin/dataset_mixin.py +0 -105
  88. evalscope/benchmarks/aigc/i2i/general_i2i_adapter.py +0 -44
  89. tests/aigc/__init__.py +0 -1
  90. /evalscope/benchmarks/{aigc → image_edit}/__init__.py +0 -0
  91. /evalscope/benchmarks/{aigc/i2i → image_edit/gedit}/__init__.py +0 -0
  92. /evalscope/benchmarks/{aigc/t2i → math_vista}/__init__.py +0 -0
  93. {evalscope-1.0.0.dist-info → evalscope-1.0.1.dist-info}/LICENSE +0 -0
  94. {evalscope-1.0.0.dist-info → evalscope-1.0.1.dist-info}/WHEEL +0 -0
  95. {evalscope-1.0.0.dist-info → evalscope-1.0.1.dist-info}/entry_points.txt +0 -0
  96. {evalscope-1.0.0.dist-info → evalscope-1.0.1.dist-info}/top_level.txt +0 -0
  97. /tests/{aigc → benchmark}/test_t2i.py +0 -0
@@ -8,105 +8,26 @@ from evalscope.report.report import *
8
8
  if TYPE_CHECKING:
9
9
  from evalscope.api.benchmark import DataAdapter
10
10
  from evalscope.api.metric import AggScore
11
- from evalscope.benchmarks import DataAdapter as OldDataAdapter
12
11
 
13
12
 
14
13
  class ReportGenerator:
15
14
 
16
15
  @staticmethod
17
- def gen_report(subset_score_map: dict, model_name: str, data_adapter: 'OldDataAdapter', **kwargs) -> Report:
18
- """
19
- Generate a report for a specific dataset based on provided subset scores.
20
-
21
- Args:
22
- subset_score_map (dict): A mapping from subset names to a list of score dictionaries.
23
- {
24
- 'subset_name': [
25
- {'metric_name': 'AverageAccuracy', 'score': 0.3389, 'num': 100},
26
- {'metric_name': 'WeightedAverageAccuracy', 'score': 0.3389, 'num': 100}
27
- ],
28
- ...
29
- }
30
- report_name (str): The name of the report to generate.
31
- data_adapter (DataAdapter): An adapter object for data handling.
32
-
33
- Returns:
34
- Report: A structured report object containing metrics, categories, and subsets.
35
-
36
- >>> report = gen_report(subset_score_map, "My Report", data_adapter, dataset_name="Dataset", model_name="Model")
37
- """ # noqa: E501
38
-
39
- dataset_name = data_adapter.name
40
- category_map = data_adapter.category_map
41
- report_name = f'{model_name}@{dataset_name}'
42
-
43
- def flatten_subset() -> DataFrame:
44
- """
45
- Flatten subset score map to a DataFrame.
46
-
47
- Example:
48
- name score num categories metric_name
49
- 0 ARC-Easy 0.5 2 [default] AverageAccuracy
50
- 1 ARC-Challenge 0.5 2 [default] AverageAccuracy
51
- """
52
- subsets = []
53
- for subset_name, scores in subset_score_map.items():
54
- for score_item in scores:
55
- categories = category_map.get(subset_name, ['default'])
56
- if isinstance(categories, str):
57
- categories = [categories]
58
- subsets.append(
59
- dict(
60
- name=subset_name,
61
- score=score_item['score'],
62
- num=score_item['num'],
63
- metric_name=score_item['metric_name'],
64
- categories=tuple(categories)
65
- )
66
- )
67
- df = pd.DataFrame(subsets)
68
- return df
69
-
70
- df = flatten_subset()
71
-
16
+ def gen_collection_report(df: DataFrame, all_dataset_name: str, model_name: str) -> Report:
72
17
  metrics_list = []
73
- for metric_name, group_metric in df.groupby('metric_name', sort=False):
18
+ for metric_name, group_metric in df.groupby('metric', sort=False):
74
19
  categories = []
75
20
  for category_name, group_category in group_metric.groupby('categories'):
76
21
  subsets = []
77
- for _, row in group_category.iterrows():
78
- subsets.append(Subset(name=row['name'], score=row['score'], num=row['num']))
79
-
22
+ for (dataset_name, subset_name), group_subset in group_category.groupby(['dataset_name',
23
+ 'subset_name']):
24
+ avg_score = group_subset['score'].mean()
25
+ num = group_subset['score'].count()
26
+ subsets.append(Subset(name=f'{dataset_name}/{subset_name}', score=float(avg_score), num=int(num)))
80
27
  categories.append(Category(name=category_name, subsets=subsets))
81
-
82
28
  metrics_list.append(Metric(name=metric_name, categories=categories))
83
-
84
- report = Report(
85
- name=report_name,
86
- metrics=metrics_list,
87
- dataset_name=dataset_name,
88
- model_name=model_name,
89
- dataset_description=data_adapter.description,
90
- dataset_pretty_name=data_adapter.pretty_name
91
- )
92
- return report
93
-
94
- @staticmethod
95
- def gen_collection_report(df: DataFrame, all_dataset_name: str, model_name: str) -> Report:
96
- categories = []
97
- for category_name, group_category in df.groupby('categories'):
98
- subsets = []
99
- for (dataset_name, subset_name), group_subset in group_category.groupby(['dataset_name', 'subset_name']):
100
- avg_score = group_subset['score'].mean()
101
- num = group_subset['score'].count()
102
- subsets.append(Subset(name=f'{dataset_name}/{subset_name}', score=float(avg_score), num=int(num)))
103
-
104
- categories.append(Category(name=category_name, subsets=subsets))
105
29
  return Report(
106
- name=DataCollection.NAME,
107
- metrics=[Metric(name='Average', categories=categories)],
108
- dataset_name=all_dataset_name,
109
- model_name=model_name
30
+ name=DataCollection.NAME, metrics=metrics_list, dataset_name=all_dataset_name, model_name=model_name
110
31
  )
111
32
 
112
33
  @staticmethod
evalscope/run.py CHANGED
@@ -131,8 +131,9 @@ def evaluate_model(task_config: TaskConfig, outputs: OutputsStructure) -> dict:
131
131
  )
132
132
  evaluators.append(evaluator)
133
133
 
134
- # Update task_config.dataset_args with benchmark metadata
135
- task_config.dataset_args[dataset_name] = benchmark.to_dict()
134
+ # Update task_config.dataset_args with benchmark metadata, except for DataCollection
135
+ if dataset_name != DataCollection.NAME:
136
+ task_config.dataset_args[dataset_name] = benchmark.to_dict()
136
137
 
137
138
  # dump task_cfg to outputs.configs_dir after creating evaluators
138
139
  task_config.dump_yaml(outputs.configs_dir)
@@ -149,17 +150,20 @@ def evaluate_model(task_config: TaskConfig, outputs: OutputsStructure) -> dict:
149
150
  logger.info(f'Overall report table: \n{report_table} \n')
150
151
  except Exception:
151
152
  logger.error('Failed to generate report table.')
152
-
153
153
  # Clean up
154
154
  if model is not None:
155
155
  import gc
156
- import torch
157
156
 
158
157
  del model
159
158
  del evaluators
160
- torch.cuda.empty_cache()
161
159
  gc.collect()
162
160
 
161
+ from evalscope.utils.import_utils import check_import
162
+ if check_import('torch'):
163
+ import torch
164
+ if torch.cuda.is_available():
165
+ torch.cuda.empty_cache()
166
+
163
167
  return eval_results
164
168
 
165
169
 
@@ -1,9 +1,5 @@
1
- import os
2
1
  from dataclasses import dataclass
3
- from swift.llm import InferEngine, InferRequest, PtEngine, RequestConfig, get_template
4
2
 
5
- # 设置GPU环境变量
6
- os.environ['CUDA_VISIBLE_DEVICES'] = '0'
7
3
 
8
4
  @dataclass
9
5
  class SwiftInferArgs:
@@ -1,6 +1,5 @@
1
1
  import os
2
2
  import time
3
- import torch
4
3
  from contextlib import contextmanager
5
4
  from functools import partial
6
5
  from pydantic import BaseModel, Field
@@ -95,6 +94,7 @@ class TextCompletionResponse(BaseModel):
95
94
  class ChatService:
96
95
 
97
96
  def __init__(self, model_path, attn_implementation):
97
+ import torch
98
98
  from modelscope import AutoModelForCausalLM, AutoTokenizer
99
99
  from transformers import TextIteratorStreamer
100
100
 
@@ -5,13 +5,35 @@ import importlib
5
5
  import os
6
6
  from itertools import chain
7
7
  from types import ModuleType
8
- from typing import Any
8
+ from typing import Any, Optional, Union
9
9
 
10
10
  from .logger import get_logger
11
11
 
12
12
  logger = get_logger() # pylint: disable=invalid-name
13
13
 
14
14
 
15
+ def check_import(module_name: str, package: Optional[str] = None, raise_error: bool = False) -> bool:
16
+ """Check if a module can be imported.
17
+
18
+ Args:
19
+ module_name (str): The name of the module to check.
20
+ package (str, optional): The package to install if the module is not found. Defaults to None.
21
+ raise_error (bool, optional): Whether to raise an error if the module is not found. Defaults to False.
22
+ """
23
+ try:
24
+ importlib.import_module(module_name)
25
+ return True
26
+ except ImportError:
27
+ error_msg = f'`{module_name}` not found.'
28
+ if package:
29
+ error_msg += f' Please run `pip install {package}` to use this feature.'
30
+ logger.warning(error_msg)
31
+
32
+ if raise_error:
33
+ raise ImportError(error_msg)
34
+ return False
35
+
36
+
15
37
  class _LazyModule(ModuleType):
16
38
  """
17
39
  Module class that surfaces all objects but only performs associated imports when the objects are requested.
@@ -1,6 +1,7 @@
1
1
  import base64
2
2
  import csv
3
3
  import hashlib
4
+ import io
4
5
  import json
5
6
  import jsonlines as jsonl
6
7
  import os
@@ -283,22 +284,62 @@ def get_valid_list(input_list, candidate_list):
283
284
  [i for i in input_list if i not in candidate_list]
284
285
 
285
286
 
286
- def PIL_to_base64(image: Image.Image, format: str = 'JPEG') -> str:
287
+ def PIL_to_base64(image: Image.Image, format: str = 'JPEG', add_header: bool = False) -> str:
287
288
  """
288
289
  Convert a PIL Image to a base64 encoded string.
289
290
 
290
291
  Args:
291
292
  image (Image.Image): The PIL Image to convert.
292
293
  format (str): The format to save the image in. Default is 'JPEG'.
294
+ add_header (bool): Whether to add the base64 header. Default is False.
295
+
293
296
  Returns:
294
297
  str: Base64 encoded string of the image.
295
298
  """
296
299
  buffered = BytesIO()
297
300
  image.save(buffered, format=format)
298
301
  img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')
302
+ if add_header:
303
+ img_str = f'data:image/{format.lower()};base64,{img_str}'
304
+ return img_str
305
+
306
+
307
+ def bytes_to_base64(bytes_data: bytes, format: str = 'png', add_header: bool = False) -> str:
308
+ """Convert image bytes to a base64 encoded string.
309
+
310
+ Args:
311
+ bytes_data (bytes): The bytes to convert.
312
+ add_header (bool): Whether to add the base64 header. Default is False.
313
+
314
+ Returns:
315
+ str: Base64 encoded string of the bytes.
316
+ """
317
+ img_str = base64.b64encode(bytes_data).decode('utf-8')
318
+ if add_header:
319
+ img_str = f'data:image/{format};base64,{img_str}'
299
320
  return img_str
300
321
 
301
322
 
323
+ def base64_to_PIL(base64_str):
324
+ """Convert a base64 encoded string to a PIL Image.
325
+
326
+ Args:
327
+ base64_str (str): The base64 encoded string.
328
+
329
+ Returns:
330
+ Image.Image: The decoded PIL Image.
331
+ """
332
+ # remove header
333
+ if ',' in base64_str:
334
+ base64_str = base64_str.split(',', 1)[1]
335
+
336
+ # decode
337
+ img_data = base64.b64decode(base64_str)
338
+ img_file = io.BytesIO(img_data)
339
+ img = Image.open(img_file)
340
+ return img
341
+
342
+
302
343
  def safe_filename(s: str, max_length: int = 255) -> str:
303
344
  """
304
345
  Convert a string into a safe filename by removing or replacing unsafe characters.
@@ -3,6 +3,8 @@ import random
3
3
  from enum import Enum
4
4
  from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
5
5
 
6
+ from evalscope.utils.import_utils import check_import
7
+
6
8
  if TYPE_CHECKING:
7
9
  from transformers import GenerationConfig
8
10
 
@@ -67,7 +69,8 @@ def seed_everything(seed: int):
67
69
  """
68
70
  random.seed(seed)
69
71
  np.random.seed(seed)
70
- try:
72
+
73
+ if check_import('torch'):
71
74
  import torch
72
75
 
73
76
  torch.manual_seed(seed)
@@ -75,5 +78,3 @@ def seed_everything(seed: int):
75
78
  torch.cuda.manual_seed_all(seed)
76
79
  torch.backends.cudnn.deterministic = True
77
80
  torch.backends.cudnn.benchmark = False
78
- except ImportError:
79
- pass
@@ -1,11 +1,8 @@
1
1
  # flake8: noqa: E501
2
- from __future__ import annotations
3
-
4
2
  import re
5
- from typing import TYPE_CHECKING, List, Optional
3
+ from typing import List, Optional, Union
6
4
 
7
- if TYPE_CHECKING:
8
- from evalscope.api.evaluator import Choices, Target, TaskState
5
+ from evalscope.api.evaluator import Choices, Target, TaskState
9
6
 
10
7
  FEW_SHOT_TEMPLATE = r"""Here are some examples of how to answer similar questions:
11
8
 
@@ -84,7 +81,9 @@ def answer_options(choices: Choices) -> str:
84
81
  return '\n'.join([f'{answer_character(i)}) {choices[j].value}' for i, j in enumerate(indexes)])
85
82
 
86
83
 
87
- def prompt(question: str, choices: Choices, template: str, fewshot: Optional[str] = None) -> str:
84
+ def prompt(question: str, choices: Union[Choices, List[str]], template: str, fewshot: Optional[str] = None) -> str:
85
+ if isinstance(choices, list):
86
+ choices = Choices(choices)
88
87
 
89
88
  choices_text = answer_options(choices)
90
89
  letters = ','.join(answer_character(i) for i in range(len(choices)))
@@ -122,6 +121,14 @@ def format_example(
122
121
  return f'{question}\n{choices_text}\nANSWER: {answer.text}'
123
122
 
124
123
 
124
+ def _fallback_parse_answer(completion: str) -> Optional[set[str]]:
125
+ # Fallback to find the last upper case letter
126
+ for letter in reversed(completion):
127
+ if letter.isupper():
128
+ return {letter}
129
+ return None
130
+
131
+
125
132
  def parse_answers(state: TaskState, multiple_correct: bool = False) -> set[str]:
126
133
  """
127
134
  Convenience function for extracting answers from the state output.
@@ -150,6 +157,11 @@ def parse_answers(state: TaskState, multiple_correct: bool = False) -> set[str]:
150
157
  state.output.completion,
151
158
  )
152
159
 
160
+ if match is None:
161
+ fallback_answer = _fallback_parse_answer(state.output.completion)
162
+ if fallback_answer:
163
+ return fallback_answer
164
+
153
165
  if match is None:
154
166
  return set()
155
167
 
@@ -200,6 +212,11 @@ def parse_answers_zh(state: TaskState, multiple_correct: bool = False) -> set[st
200
212
  pattern = r'答案\s*[::]\s*([A-Za-z0-9,,]+)'
201
213
  match = re.search(pattern, state.output.completion, flags=re.MULTILINE)
202
214
 
215
+ if match is None:
216
+ fallback_answer = _fallback_parse_answer(state.output.completion)
217
+ if fallback_answer:
218
+ return fallback_answer
219
+
203
220
  if match is None:
204
221
  return set()
205
222
 
evalscope/version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # Copyright (c) Alibaba, Inc. and its affiliates.
2
2
 
3
- __version__ = '1.0.0'
4
- __release_datetime__ = '2025-08-25 12:00:00'
3
+ __version__ = '1.0.1'
4
+ __release_datetime__ = '2025-09-05 14:00:00'
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: evalscope
3
- Version: 1.0.0
3
+ Version: 1.0.1
4
4
  Summary: EvalScope: Lightweight LLMs Evaluation Framework
5
5
  Home-page: https://github.com/modelscope/evalscope
6
6
  Author: ModelScope team
@@ -17,12 +17,10 @@ Classifier: Programming Language :: Python :: 3.12
17
17
  Requires-Python: >=3.9
18
18
  Description-Content-Type: text/markdown
19
19
  License-File: LICENSE
20
- Requires-Dist: accelerate
21
20
  Requires-Dist: colorlog
22
21
  Requires-Dist: datasets==3.6.0
23
22
  Requires-Dist: docstring-parser
24
23
  Requires-Dist: dotenv
25
- Requires-Dist: immutabledict
26
24
  Requires-Dist: jieba
27
25
  Requires-Dist: jsonlines
28
26
  Requires-Dist: langdetect
@@ -34,7 +32,6 @@ Requires-Dist: openai
34
32
  Requires-Dist: overrides
35
33
  Requires-Dist: pandas
36
34
  Requires-Dist: pillow
37
- Requires-Dist: pyarrow
38
35
  Requires-Dist: pydantic
39
36
  Requires-Dist: pyyaml>=5.1
40
37
  Requires-Dist: requests
@@ -46,7 +43,6 @@ Requires-Dist: scikit-learn
46
43
  Requires-Dist: seaborn
47
44
  Requires-Dist: sympy
48
45
  Requires-Dist: tabulate
49
- Requires-Dist: torch
50
46
  Requires-Dist: tqdm
51
47
  Requires-Dist: transformers>=4.33
52
48
  Requires-Dist: word2number
@@ -57,14 +53,13 @@ Requires-Dist: omegaconf; extra == "aigc"
57
53
  Requires-Dist: open-clip-torch; extra == "aigc"
58
54
  Requires-Dist: opencv-python; extra == "aigc"
59
55
  Requires-Dist: peft>=0.17; extra == "aigc"
56
+ Requires-Dist: torch; extra == "aigc"
60
57
  Requires-Dist: torchvision; extra == "aigc"
61
58
  Provides-Extra: all
62
- Requires-Dist: accelerate; extra == "all"
63
59
  Requires-Dist: colorlog; extra == "all"
64
60
  Requires-Dist: datasets==3.6.0; extra == "all"
65
61
  Requires-Dist: docstring-parser; extra == "all"
66
62
  Requires-Dist: dotenv; extra == "all"
67
- Requires-Dist: immutabledict; extra == "all"
68
63
  Requires-Dist: jieba; extra == "all"
69
64
  Requires-Dist: jsonlines; extra == "all"
70
65
  Requires-Dist: langdetect; extra == "all"
@@ -76,7 +71,6 @@ Requires-Dist: openai; extra == "all"
76
71
  Requires-Dist: overrides; extra == "all"
77
72
  Requires-Dist: pandas; extra == "all"
78
73
  Requires-Dist: pillow; extra == "all"
79
- Requires-Dist: pyarrow; extra == "all"
80
74
  Requires-Dist: pydantic; extra == "all"
81
75
  Requires-Dist: pyyaml>=5.1; extra == "all"
82
76
  Requires-Dist: requests; extra == "all"
@@ -88,7 +82,6 @@ Requires-Dist: scikit-learn; extra == "all"
88
82
  Requires-Dist: seaborn; extra == "all"
89
83
  Requires-Dist: sympy; extra == "all"
90
84
  Requires-Dist: tabulate; extra == "all"
91
- Requires-Dist: torch; extra == "all"
92
85
  Requires-Dist: tqdm; extra == "all"
93
86
  Requires-Dist: transformers>=4.33; extra == "all"
94
87
  Requires-Dist: word2number; extra == "all"
@@ -100,6 +93,7 @@ Requires-Dist: langchain-core<0.4.0,>=0.3.0; extra == "all"
100
93
  Requires-Dist: langchain-openai<0.4.0,>=0.3.0; extra == "all"
101
94
  Requires-Dist: mteb==1.38.20; extra == "all"
102
95
  Requires-Dist: ragas==0.2.14; extra == "all"
96
+ Requires-Dist: torch; extra == "all"
103
97
  Requires-Dist: webdataset>0.2.0; extra == "all"
104
98
  Requires-Dist: aiohttp; extra == "all"
105
99
  Requires-Dist: fastapi; extra == "all"
@@ -154,6 +148,7 @@ Requires-Dist: langchain-core<0.4.0,>=0.3.0; extra == "rag"
154
148
  Requires-Dist: langchain-openai<0.4.0,>=0.3.0; extra == "rag"
155
149
  Requires-Dist: mteb==1.38.20; extra == "rag"
156
150
  Requires-Dist: ragas==0.2.14; extra == "rag"
151
+ Requires-Dist: torch; extra == "rag"
157
152
  Requires-Dist: webdataset>0.2.0; extra == "rag"
158
153
  Provides-Extra: vlmeval
159
154
  Requires-Dist: ms-vlmeval>=0.0.17; extra == "vlmeval"
@@ -277,7 +272,9 @@ Please scan the QR code below to join our community groups:
277
272
  >
278
273
  > Version 1.0 introduces a major overhaul of the evaluation framework, establishing a new, more modular and extensible API layer under `evalscope/api`. Key improvements include standardized data models for benchmarks, samples, and results; a registry-based design for components such as benchmarks and metrics; and a rewritten core evaluator that orchestrates the new architecture. Existing benchmark adapters have been migrated to this API, resulting in cleaner, more consistent, and easier-to-maintain implementations.
279
274
 
280
- - 🔥 **[2025.08.22]** Version 1.0 Refactoring.
275
+ - 🔥 **[2025.09.05]** Added support for vision-language multimodal model evaluation tasks, such as MathVista and MMMU. For more supported datasets, please [refer to the documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/vlm.html).
276
+ - 🔥 **[2025.09.04]** Added support for image editing task evaluation, including the [GEdit-Bench](https://modelscope.cn/datasets/stepfun-ai/GEdit-Bench) benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/image_edit.html).
277
+ - 🔥 **[2025.08.22]** Version 1.0 Refactoring. Break changes, please [refer to](https://evalscope.readthedocs.io/en/latest/get_started/basic_usage.html#switching-to-version-v1-0).
281
278
  - 🔥 **[2025.07.18]** The model stress testing now supports randomly generating image-text data for multimodal model evaluation. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#id4).
282
279
  - 🔥 **[2025.07.16]** Support for [τ-bench](https://github.com/sierra-research/tau-bench) has been added, enabling the evaluation of AI Agent performance and reliability in real-world scenarios involving dynamic user and tool interactions. For usage instructions, please refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/llm.html#bench).
283
280
  - 🔥 **[2025.07.14]** Support for "Humanity's Last Exam" ([Humanity's-Last-Exam](https://modelscope.cn/datasets/cais/hle)), a highly challenging evaluation benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/llm.html#humanity-s-last-exam).
@@ -285,7 +282,7 @@ Please scan the QR code below to join our community groups:
285
282
  - 🔥 **[2025.06.28]** Optimized custom dataset evaluation: now supports evaluation without reference answers. Enhanced LLM judge usage, with built-in modes for "scoring directly without reference answers" and "checking answer consistency with reference answers". See [reference](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset/llm.html#qa) for details.
286
283
  - 🔥 **[2025.06.19]** Added support for the [BFCL-v3](https://modelscope.cn/datasets/AI-ModelScope/bfcl_v3) benchmark, designed to evaluate model function-calling capabilities across various scenarios. For more information, refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/third_party/bfcl_v3.html).
287
284
  - 🔥 **[2025.06.02]** Added support for the Needle-in-a-Haystack test. Simply specify `needle_haystack` to conduct the test, and a corresponding heatmap will be generated in the `outputs/reports` folder, providing a visual representation of the model's performance. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/needle_haystack.html) for more details.
288
- - 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html).
285
+ - 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html).
289
286
  - 🔥 **[2025.05.16]** Model service performance stress testing now supports setting various levels of concurrency and outputs a performance test report. [Reference example](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/quick_start.html#id3).
290
287
  - 🔥 **[2025.05.13]** Added support for the [ToolBench-Static](https://modelscope.cn/datasets/AI-ModelScope/ToolBench-Static) dataset to evaluate model's tool-calling capabilities. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) for usage instructions. Also added support for the [DROP](https://modelscope.cn/datasets/AI-ModelScope/DROP/dataPeview) and [Winogrande](https://modelscope.cn/datasets/AI-ModelScope/winogrande_val) benchmarks to assess the reasoning capabilities of models.
291
288
  <details><summary>More</summary>
@@ -294,7 +291,7 @@ Please scan the QR code below to join our community groups:
294
291
  - 🔥 **[2025.04.27]** Support for text-to-image evaluation: Supports 8 metrics including MPS, HPSv2.1Score, etc., and evaluation benchmarks such as EvalMuse, GenAI-Bench. Refer to the [user documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/t2i.html) for more details.
295
292
  - 🔥 **[2025.04.10]** Model service stress testing tool now supports the `/v1/completions` endpoint (the default endpoint for vLLM benchmarking)
296
293
  - 🔥 **[2025.04.08]** Support for evaluating embedding model services compatible with the OpenAI API has been added. For more details, check the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html#configure-evaluation-parameters).
297
- - 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
294
+ - 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
298
295
  - 🔥 **[2025.03.20]** The model inference service stress testing now supports generating prompts of specified length using random values. Refer to the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#using-the-random-dataset) for more details.
299
296
  - 🔥 **[2025.03.13]** Added support for the [LiveCodeBench](https://www.modelscope.cn/datasets/AI-ModelScope/code_generation_lite/summary) code evaluation benchmark, which can be used by specifying `live_code_bench`. Supports evaluating QwQ-32B on LiveCodeBench, refer to the [best practices](https://evalscope.readthedocs.io/en/latest/best_practice/eval_qwq.html).
300
297
  - 🔥 **[2025.03.11]** Added support for the [SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/SimpleQA/summary) and [Chinese SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/Chinese-SimpleQA/summary) evaluation benchmarks. These are used to assess the factual accuracy of models, and you can specify `simple_qa` and `chinese_simpleqa` for use. Support for specifying a judge model is also available. For more details, refer to the [relevant parameter documentation](https://evalscope.readthedocs.io/en/latest/get_started/parameters.html).
@@ -493,7 +490,7 @@ run_task(task_cfg="config.json")
493
490
 
494
491
  ### Basic Parameter
495
492
  - `--model`: Specifies the `model_id` of the model in [ModelScope](https://modelscope.cn/), which can be automatically downloaded, e.g., [Qwen/Qwen2.5-0.5B-Instruct](https://modelscope.cn/models/Qwen/Qwen2.5-0.5B-Instruct/summary); or use the local path of the model, e.g., `/path/to/model`
496
- - `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
493
+ - `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
497
494
  - `--limit`: Maximum amount of evaluation data for each dataset. If not specified, it defaults to evaluating all data. Can be used for quick validation
498
495
 
499
496
  ### Output Results
@@ -582,7 +579,7 @@ For more customized evaluations, such as customizing model parameters or dataset
582
579
  evalscope eval \
583
580
  --model Qwen/Qwen3-0.6B \
584
581
  --model-args '{"revision": "master", "precision": "torch.float16", "device_map": "auto"}' \
585
- --generation-config '{"do_sample":true,"temperature":0.6,"max_new_tokens":512,"chat_template_kwargs":{"enable_thinking": false}}' \
582
+ --generation-config '{"do_sample":true,"temperature":0.6,"max_tokens":512,"chat_template_kwargs":{"enable_thinking": false}}' \
586
583
  --dataset-args '{"gsm8k": {"few_shot_num": 0, "few_shot_random": false}}' \
587
584
  --datasets gsm8k \
588
585
  --limit 10
@@ -596,7 +593,7 @@ evalscope eval \
596
593
  - `--generation-config`: Generation parameters, passed as a JSON string and parsed as a dictionary:
597
594
  - `do_sample`: Whether to use sampling
598
595
  - `temperature`: Generation temperature
599
- - `max_new_tokens`: Maximum length of generated tokens
596
+ - `max_tokens`: Maximum length of generated tokens
600
597
  - `chat_template_kwargs`: Model inference template parameters
601
598
  - `--dataset-args`: Settings for the evaluation dataset, passed as a JSON string where the key is the dataset name and the value is the parameters. Note that these need to correspond one-to-one with the values in the `--datasets` parameter:
602
599
  - `few_shot_num`: Number of few-shot examples