evalscope 1.0.1__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (87) hide show
  1. evalscope/api/benchmark/adapters/default_data_adapter.py +6 -4
  2. evalscope/api/benchmark/adapters/multi_choice_adapter.py +5 -2
  3. evalscope/api/benchmark/adapters/text2image_adapter.py +5 -4
  4. evalscope/api/benchmark/adapters/vision_language_adapter.py +3 -1
  5. evalscope/api/benchmark/benchmark.py +27 -2
  6. evalscope/api/benchmark/meta.py +3 -0
  7. evalscope/api/evaluator/evaluator.py +5 -0
  8. evalscope/api/evaluator/state.py +5 -0
  9. evalscope/api/messages/chat_message.py +6 -1
  10. evalscope/api/mixin/__init__.py +1 -0
  11. evalscope/api/mixin/llm_judge_mixin.py +2 -0
  12. evalscope/api/mixin/sandbox_mixin.py +204 -0
  13. evalscope/api/model/generate_config.py +0 -3
  14. evalscope/api/model/model.py +1 -1
  15. evalscope/api/tool/tool_info.py +1 -1
  16. evalscope/arguments.py +6 -0
  17. evalscope/benchmarks/ai2d/__init__.py +0 -0
  18. evalscope/benchmarks/ai2d/ai2d_adapter.py +53 -0
  19. evalscope/benchmarks/amc/__init__.py +0 -0
  20. evalscope/benchmarks/amc/amc_adapter.py +46 -0
  21. evalscope/benchmarks/bbh/bbh_adapter.py +43 -17
  22. evalscope/benchmarks/bfcl/bfcl_adapter.py +141 -2
  23. evalscope/benchmarks/bfcl/generation.py +7 -7
  24. evalscope/benchmarks/drop/drop_adapter.py +1 -1
  25. evalscope/benchmarks/healthbench/__init__.py +0 -0
  26. evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
  27. evalscope/benchmarks/healthbench/utils.py +102 -0
  28. evalscope/benchmarks/humaneval/humaneval_adapter.py +19 -35
  29. evalscope/benchmarks/humaneval/utils.py +235 -0
  30. evalscope/benchmarks/live_code_bench/evaluate_utils.py +13 -6
  31. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +60 -37
  32. evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
  33. evalscope/benchmarks/math_500/math_500_adapter.py +0 -1
  34. evalscope/benchmarks/minerva_math/__init__.py +0 -0
  35. evalscope/benchmarks/minerva_math/minerva_math_adapter.py +48 -0
  36. evalscope/benchmarks/mm_bench/__init__.py +0 -0
  37. evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
  38. evalscope/benchmarks/mm_star/__init__.py +0 -0
  39. evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
  40. evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +4 -9
  41. evalscope/benchmarks/multi_if/__init__.py +0 -0
  42. evalscope/benchmarks/multi_if/ifeval.py +3354 -0
  43. evalscope/benchmarks/multi_if/metrics.py +120 -0
  44. evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
  45. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +1 -4
  46. evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
  47. evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
  48. evalscope/benchmarks/olympiad_bench/utils.py +565 -0
  49. evalscope/benchmarks/omni_bench/__init__.py +0 -0
  50. evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
  51. evalscope/benchmarks/real_world_qa/__init__.py +0 -0
  52. evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
  53. evalscope/benchmarks/tau_bench/tau_bench_adapter.py +6 -1
  54. evalscope/config.py +24 -1
  55. evalscope/constants.py +3 -0
  56. evalscope/evaluator/evaluator.py +25 -7
  57. evalscope/metrics/metric.py +27 -2
  58. evalscope/models/model_apis.py +10 -8
  59. evalscope/models/utils/openai.py +1 -2
  60. evalscope/perf/arguments.py +2 -0
  61. evalscope/perf/plugin/api/base.py +2 -2
  62. evalscope/perf/plugin/api/default_api.py +7 -7
  63. evalscope/perf/plugin/api/openai_api.py +83 -19
  64. evalscope/perf/plugin/datasets/flickr8k.py +2 -2
  65. evalscope/perf/plugin/datasets/kontext_bench.py +2 -2
  66. evalscope/perf/plugin/datasets/random_vl_dataset.py +2 -2
  67. evalscope/perf/utils/benchmark_util.py +1 -2
  68. evalscope/report/combinator.py +0 -25
  69. evalscope/report/report.py +8 -4
  70. evalscope/run.py +1 -1
  71. evalscope/utils/function_utils.py +41 -0
  72. evalscope/utils/import_utils.py +63 -13
  73. evalscope/utils/io_utils.py +19 -11
  74. evalscope/utils/json_schema.py +23 -2
  75. evalscope/utils/logger.py +19 -0
  76. evalscope/utils/model_utils.py +1 -1
  77. evalscope/version.py +2 -2
  78. {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/METADATA +6 -10
  79. {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/RECORD +87 -59
  80. tests/benchmark/test_eval.py +51 -7
  81. tests/benchmark/test_sandbox.py +81 -0
  82. tests/benchmark/test_vlm.py +60 -3
  83. tests/perf/test_perf.py +40 -12
  84. {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/LICENSE +0 -0
  85. {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/WHEEL +0 -0
  86. {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/entry_points.txt +0 -0
  87. {evalscope-1.0.1.dist-info → evalscope-1.0.2.dist-info}/top_level.txt +0 -0
@@ -86,28 +86,3 @@ def gen_table(
86
86
  add_overall_metric=add_overall_metric
87
87
  )
88
88
  return tabulate(table, headers=table.columns, tablefmt='grid', showindex=False)
89
-
90
-
91
- class ReportsRecorder:
92
- COMMON_DATASET_PATH = []
93
- CUSTOM_DATASET_PATH = []
94
-
95
- def __init__(self, oss_url: str = '', endpoint: str = ''):
96
- pass
97
-
98
-
99
- if __name__ == '__main__':
100
- report_dir_1 = './outputs/20250117_151926'
101
- # report_dir_2 = './outputs/20250107_204445/reports'
102
-
103
- report_table = gen_table(reports_path_list=[report_dir_1])
104
- print(report_table)
105
-
106
- # ALL VALUES ONLY FOR EXAMPLE
107
- # +--------------------------+-------------------+-------------+
108
- # | Model | CompetitionMath | GSM8K |
109
- # +==========================+===================+=============+
110
- # | ZhipuAI_chatglm2-6b-base | 25.0 (acc) | 30.50 (acc) |
111
- # +--------------------------+-------------------+-------------+
112
- # | ZhipuAI_chatglm2-6b | 30.5 (acc) | 40.50 (acc) |
113
- # +--------------------------+-------------------+-------------+
@@ -22,7 +22,7 @@ ANALYSIS_PROMPT = """根据给出的json格式的模型评测结果,输出分
22
22
  """
23
23
 
24
24
 
25
- def normalize_score(score: Union[float, dict], keep_num: int = 4) -> Union[float, dict]:
25
+ def normalize_score(score: Union[float, dict, int], keep_num: int = 4) -> Union[float, dict]:
26
26
  """
27
27
  Normalize score.
28
28
 
@@ -37,9 +37,10 @@ def normalize_score(score: Union[float, dict], keep_num: int = 4) -> Union[float
37
37
  score = round(score, keep_num)
38
38
  elif isinstance(score, dict):
39
39
  score = {k: round(v, keep_num) for k, v in score.items()}
40
+ elif isinstance(score, int):
41
+ score = float(score)
40
42
  else:
41
43
  logger.warning(f'Unknown score type: {type(score)}')
42
-
43
44
  return score
44
45
 
45
46
 
@@ -103,6 +104,7 @@ class ReportKey:
103
104
  subset_name = 'Subset'
104
105
  num = 'Num'
105
106
  score = 'Score'
107
+ overall_score = 'OVERALL'
106
108
 
107
109
 
108
110
  @dataclass
@@ -181,12 +183,14 @@ class Report:
181
183
  table[ReportKey.num].append(subset.num)
182
184
  table[ReportKey.score].append(subset.score)
183
185
  # add overall metric when there are multiple subsets
184
- if metric_count > 1 and add_overall_metric:
186
+ if metric_count > 1 and add_overall_metric and (
187
+ ReportKey.overall_score not in table[ReportKey.subset_name]
188
+ ):
185
189
  table[ReportKey.model_name].append(self.model_name)
186
190
  table[ReportKey.dataset_name].append(self.dataset_name)
187
191
  table[ReportKey.metric_name].append(metric.name)
188
192
  table[ReportKey.category_name].append(('-', ))
189
- table[ReportKey.subset_name].append('OVERALL')
193
+ table[ReportKey.subset_name].append(ReportKey.overall_score)
190
194
  table[ReportKey.num].append(metric.num)
191
195
  table[ReportKey.score].append(metric.score)
192
196
  # NOTE: only flatten metrics if needed, use the first metric by default
evalscope/run.py CHANGED
@@ -159,7 +159,7 @@ def evaluate_model(task_config: TaskConfig, outputs: OutputsStructure) -> dict:
159
159
  gc.collect()
160
160
 
161
161
  from evalscope.utils.import_utils import check_import
162
- if check_import('torch'):
162
+ if check_import('torch', raise_warning=False):
163
163
  import torch
164
164
  if torch.cuda.is_available():
165
165
  torch.cuda.empty_cache()
@@ -1,4 +1,6 @@
1
1
  import threading
2
+ import time
3
+ from contextlib import contextmanager
2
4
  from functools import wraps
3
5
 
4
6
 
@@ -27,3 +29,42 @@ def thread_safe(func):
27
29
  return func(*args, **kwargs)
28
30
 
29
31
  return wrapper
32
+
33
+
34
+ def retry_func(retries=3, sleep_interval=0):
35
+ """A decorator that retries a function call up to `retries` times if an exception occurs."""
36
+
37
+ def decorator(func):
38
+
39
+ @wraps(func)
40
+ def wrapper(*args, **kwargs):
41
+ last_exception = None
42
+ for attempt in range(retries):
43
+ try:
44
+ return func(*args, **kwargs)
45
+ except Exception as e:
46
+ last_exception = e
47
+ if sleep_interval > 0:
48
+ time.sleep(sleep_interval)
49
+ raise last_exception
50
+
51
+ return wrapper
52
+
53
+ return decorator
54
+
55
+
56
+ @contextmanager
57
+ def retry_context(retries=3, sleep_interval=0):
58
+ """A context manager that retries the code block up to `retries` times if an exception occurs."""
59
+ last_exception = None
60
+ for attempt in range(retries):
61
+ try:
62
+ yield
63
+ return # If no exception, exit successfully
64
+ except Exception as e:
65
+ last_exception = e
66
+ if sleep_interval > 0:
67
+ time.sleep(sleep_interval)
68
+ if attempt == retries - 1: # Last attempt
69
+ break
70
+ raise last_exception
@@ -7,32 +7,82 @@ from itertools import chain
7
7
  from types import ModuleType
8
8
  from typing import Any, Optional, Union
9
9
 
10
+ from evalscope.constants import IS_BUILD_DOC
10
11
  from .logger import get_logger
11
12
 
12
13
  logger = get_logger() # pylint: disable=invalid-name
13
14
 
14
15
 
15
- def check_import(module_name: str, package: Optional[str] = None, raise_error: bool = False) -> bool:
16
- """Check if a module can be imported.
16
+ def check_import(
17
+ module_name: Union[str, list[str]],
18
+ package: Optional[Union[str, list[str]]] = None,
19
+ raise_warning: bool = True,
20
+ raise_error: bool = False,
21
+ feature_name: Optional[str] = 'this feature',
22
+ ) -> bool:
23
+ """Check if a module or list of modules can be imported.
17
24
 
18
25
  Args:
19
- module_name (str): The name of the module to check.
20
- package (str, optional): The package to install if the module is not found. Defaults to None.
21
- raise_error (bool, optional): Whether to raise an error if the module is not found. Defaults to False.
26
+ module_name (Union[str, list[str]]): The name(s) of the module(s) to check.
27
+ package (Union[str, list[str]], optional): The package(s) to install if the module(s) are not found.
28
+ Defaults to None.
29
+ raise_error (bool, optional): Whether to raise an error if any module is not found. Defaults to False.
30
+ raise_warning (bool, optional): Whether to log a warning if any module is not found. Defaults to True.
31
+ feature_name (str, optional): The feature name that requires the module(s). Used in the warning/error message.
32
+ Defaults to 'this feature'.
33
+
34
+ Returns:
35
+ bool: True if all modules can be imported, False otherwise.
22
36
  """
23
- try:
24
- importlib.import_module(module_name)
25
- return True
26
- except ImportError:
27
- error_msg = f'`{module_name}` not found.'
28
- if package:
29
- error_msg += f' Please run `pip install {package}` to use this feature.'
37
+ # Convert single strings to lists for uniform processing
38
+ if isinstance(module_name, str):
39
+ module_names = [module_name]
40
+ else:
41
+ module_names = module_name
42
+
43
+ if package is None:
44
+ packages = [None] * len(module_names)
45
+ elif isinstance(package, str):
46
+ packages = [package] * len(module_names)
47
+ else:
48
+ packages = package
49
+ # Ensure packages list has same length as module_names
50
+ if len(packages) < len(module_names):
51
+ packages.extend([None] * (len(module_names) - len(packages)))
52
+
53
+ missing_modules = []
54
+ missing_packages = []
55
+
56
+ for i, mod_name in enumerate(module_names):
57
+ try:
58
+ importlib.import_module(mod_name)
59
+ except ImportError:
60
+ missing_modules.append(mod_name)
61
+ if i < len(packages) and packages[i]:
62
+ missing_packages.append(packages[i])
63
+
64
+ if missing_modules:
65
+ if len(missing_modules) == 1:
66
+ error_msg = f'`{missing_modules[0]}` not found.'
67
+ else:
68
+ error_msg = f'The following modules are not found: {", ".join(f"`{mod}`" for mod in missing_modules)}.'
69
+
70
+ if missing_packages:
71
+ if len(missing_packages) == 1:
72
+ error_msg += f' Please run `pip install {missing_packages[0]}` to use {feature_name}.'
73
+ else:
74
+ unique_packages = list(dict.fromkeys(missing_packages)) # Remove duplicates while preserving order
75
+ error_msg += f' Please run `pip install {" ".join(unique_packages)}` to use {feature_name}.'
76
+
77
+ if raise_warning:
30
78
  logger.warning(error_msg)
31
79
 
32
- if raise_error:
80
+ if not IS_BUILD_DOC and raise_error:
33
81
  raise ImportError(error_msg)
34
82
  return False
35
83
 
84
+ return True
85
+
36
86
 
37
87
  class _LazyModule(ModuleType):
38
88
  """
@@ -9,6 +9,7 @@ import re
9
9
  import string
10
10
  import unicodedata
11
11
  import yaml
12
+ from datetime import datetime
12
13
  from io import BytesIO
13
14
  from PIL import Image
14
15
 
@@ -123,6 +124,9 @@ def dump_jsonl_data(data_list, jsonl_file, dump_mode=DumpMode.OVERWRITE):
123
124
  if not isinstance(data_list, list):
124
125
  data_list = [data_list]
125
126
 
127
+ # Convert non-serializable types to serializable ones
128
+ data_list = convert_normal_types(data_list)
129
+
126
130
  if dump_mode == DumpMode.OVERWRITE:
127
131
  dump_mode = 'w'
128
132
  elif dump_mode == DumpMode.APPEND:
@@ -304,20 +308,22 @@ def PIL_to_base64(image: Image.Image, format: str = 'JPEG', add_header: bool = F
304
308
  return img_str
305
309
 
306
310
 
307
- def bytes_to_base64(bytes_data: bytes, format: str = 'png', add_header: bool = False) -> str:
308
- """Convert image bytes to a base64 encoded string.
311
+ def bytes_to_base64(bytes_data: bytes, *, format: str = 'png', add_header: bool = False, content_type='image') -> str:
312
+ """Convert bytes to a base64 encoded string.
309
313
 
310
314
  Args:
311
315
  bytes_data (bytes): The bytes to convert.
316
+ format (str): The format of the image. Default is 'png'.
312
317
  add_header (bool): Whether to add the base64 header. Default is False.
318
+ content_type (str): The type of the data, 'image' or 'audio'. Default is 'image'.
313
319
 
314
320
  Returns:
315
321
  str: Base64 encoded string of the bytes.
316
322
  """
317
- img_str = base64.b64encode(bytes_data).decode('utf-8')
323
+ base64_str = base64.b64encode(bytes_data).decode('utf-8')
318
324
  if add_header:
319
- img_str = f'data:image/{format};base64,{img_str}'
320
- return img_str
325
+ base64_str = f'data:{content_type}/{format};base64,{base64_str}'
326
+ return base64_str
321
327
 
322
328
 
323
329
  def base64_to_PIL(base64_str):
@@ -392,11 +398,13 @@ def safe_filename(s: str, max_length: int = 255) -> str:
392
398
  return s
393
399
 
394
400
 
395
- def convert_numpy_types(obj):
396
- """Recursively convert numpy types to native Python types for JSON serialization."""
401
+ def convert_normal_types(obj):
402
+ """Recursively convert numpy types and datetime objects to native Python types for JSON serialization."""
397
403
  import numpy as np
398
404
 
399
- if isinstance(obj, np.bool_):
405
+ if isinstance(obj, datetime):
406
+ return obj.isoformat()
407
+ elif isinstance(obj, np.bool_):
400
408
  return bool(obj)
401
409
  elif isinstance(obj, np.integer):
402
410
  return int(obj)
@@ -405,10 +413,10 @@ def convert_numpy_types(obj):
405
413
  elif isinstance(obj, np.ndarray):
406
414
  return obj.tolist()
407
415
  elif isinstance(obj, dict):
408
- return {key: convert_numpy_types(value) for key, value in obj.items()}
416
+ return {key: convert_normal_types(value) for key, value in obj.items()}
409
417
  elif isinstance(obj, list):
410
- return [convert_numpy_types(item) for item in obj]
418
+ return [convert_normal_types(item) for item in obj]
411
419
  elif isinstance(obj, tuple):
412
- return tuple(convert_numpy_types(item) for item in obj)
420
+ return tuple(convert_normal_types(item) for item in obj)
413
421
  else:
414
422
  return obj
@@ -4,7 +4,7 @@ from copy import deepcopy
4
4
  from dataclasses import is_dataclass
5
5
  from datetime import date, datetime, time
6
6
  from enum import EnumMeta
7
- from pydantic import BaseModel, Field
7
+ from pydantic import BaseModel, Field, field_validator, model_validator
8
8
  from typing import (
9
9
  Any,
10
10
  Dict,
@@ -59,6 +59,26 @@ class JSONSchema(BaseModel):
59
59
  required: Optional[List[str]] = Field(default=None)
60
60
  """Required fields for object parameters."""
61
61
 
62
+ @field_validator('type')
63
+ def validate_type(cls, v: Optional[str]) -> Optional[JSONType]:
64
+ return python_type_to_json_type(v)
65
+
66
+ @model_validator(mode='before')
67
+ def convert_type_before_validation(cls, values):
68
+ values = deepcopy(values)
69
+
70
+ def recursive_convert_type(obj):
71
+ if isinstance(obj, dict):
72
+ if 'type' in obj:
73
+ obj['type'] = python_type_to_json_type(obj['type'])
74
+ for k, v in obj.items():
75
+ obj[k] = recursive_convert_type(v)
76
+ elif isinstance(obj, list):
77
+ return [recursive_convert_type(item) for item in obj]
78
+ return obj
79
+
80
+ return recursive_convert_type(values)
81
+
62
82
 
63
83
  def json_schema(t: Type[Any]) -> JSONSchema:
64
84
  """Provide a JSON Schema for the specified type.
@@ -152,6 +172,8 @@ def cls_json_schema(cls: Type[Any]) -> JSONSchema:
152
172
 
153
173
 
154
174
  def python_type_to_json_type(python_type: Optional[str]) -> JSONType:
175
+ if python_type is not None and python_type in get_args(JSONType):
176
+ return python_type
155
177
  if python_type == 'str':
156
178
  return 'string'
157
179
  elif python_type == 'int':
@@ -205,4 +227,3 @@ def resolve_schema_references(schema: Dict[str, Any]) -> Dict[str, Any]:
205
227
  return obj
206
228
 
207
229
  return cast(Dict[str, Any], _resolve_refs(schema))
208
- return cast(Dict[str, Any], _resolve_refs(schema))
evalscope/utils/logger.py CHANGED
@@ -28,6 +28,25 @@ logging.getLogger('datasets').setLevel(logging.WARNING)
28
28
  logging.getLogger('httpx').setLevel(logging.WARNING)
29
29
  logging.getLogger('modelscope').setLevel(logging.ERROR)
30
30
 
31
+ info_set = set()
32
+ warning_set = set()
33
+
34
+
35
+ def info_once(self, msg, *args, **kwargs):
36
+ hash_id = kwargs.get('hash_id') or msg
37
+ if hash_id in info_set:
38
+ return
39
+ info_set.add(hash_id)
40
+ self.info(msg)
41
+
42
+
43
+ def warning_once(self, msg, *args, **kwargs):
44
+ hash_id = kwargs.get('hash_id') or msg
45
+ if hash_id in warning_set:
46
+ return
47
+ warning_set.add(hash_id)
48
+ self.warning(msg)
49
+
31
50
 
32
51
  def get_logger(
33
52
  log_file: Optional[str] = None,
@@ -70,7 +70,7 @@ def seed_everything(seed: int):
70
70
  random.seed(seed)
71
71
  np.random.seed(seed)
72
72
 
73
- if check_import('torch'):
73
+ if check_import('torch', raise_warning=False):
74
74
  import torch
75
75
 
76
76
  torch.manual_seed(seed)
evalscope/version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # Copyright (c) Alibaba, Inc. and its affiliates.
2
2
 
3
- __version__ = '1.0.1'
4
- __release_datetime__ = '2025-09-05 14:00:00'
3
+ __version__ = '1.0.2'
4
+ __release_datetime__ = '2025-09-23 18:00:00'
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: evalscope
3
- Version: 1.0.1
3
+ Version: 1.0.2
4
4
  Summary: EvalScope: Lightweight LLMs Evaluation Framework
5
5
  Home-page: https://github.com/modelscope/evalscope
6
6
  Author: ModelScope team
@@ -24,7 +24,7 @@ Requires-Dist: dotenv
24
24
  Requires-Dist: jieba
25
25
  Requires-Dist: jsonlines
26
26
  Requires-Dist: langdetect
27
- Requires-Dist: latex2sympy2-extended
27
+ Requires-Dist: latex2sympy2-extended[antlr4_9_3]
28
28
  Requires-Dist: matplotlib
29
29
  Requires-Dist: modelscope[framework]>=1.27
30
30
  Requires-Dist: nltk>=3.9
@@ -63,7 +63,7 @@ Requires-Dist: dotenv; extra == "all"
63
63
  Requires-Dist: jieba; extra == "all"
64
64
  Requires-Dist: jsonlines; extra == "all"
65
65
  Requires-Dist: langdetect; extra == "all"
66
- Requires-Dist: latex2sympy2-extended; extra == "all"
66
+ Requires-Dist: latex2sympy2-extended[antlr4_9_3]; extra == "all"
67
67
  Requires-Dist: matplotlib; extra == "all"
68
68
  Requires-Dist: modelscope[framework]>=1.27; extra == "all"
69
69
  Requires-Dist: nltk>=3.9; extra == "all"
@@ -97,6 +97,7 @@ Requires-Dist: torch; extra == "all"
97
97
  Requires-Dist: webdataset>0.2.0; extra == "all"
98
98
  Requires-Dist: aiohttp; extra == "all"
99
99
  Requires-Dist: fastapi; extra == "all"
100
+ Requires-Dist: jinja2; extra == "all"
100
101
  Requires-Dist: numpy; extra == "all"
101
102
  Requires-Dist: sse-starlette; extra == "all"
102
103
  Requires-Dist: transformers; extra == "all"
@@ -110,17 +111,10 @@ Requires-Dist: open-clip-torch; extra == "all"
110
111
  Requires-Dist: opencv-python; extra == "all"
111
112
  Requires-Dist: peft>=0.17; extra == "all"
112
113
  Requires-Dist: torchvision; extra == "all"
113
- Requires-Dist: bfcl-eval==2025.6.16; extra == "all"
114
- Requires-Dist: human-eval; extra == "all"
115
- Requires-Dist: pytest; extra == "all"
116
- Requires-Dist: pytest-cov; extra == "all"
117
- Requires-Dist: python-dotenv; extra == "all"
118
114
  Provides-Extra: app
119
115
  Requires-Dist: gradio==5.4.0; extra == "app"
120
116
  Requires-Dist: plotly<6.0.0,>=5.23.0; extra == "app"
121
117
  Provides-Extra: dev
122
- Requires-Dist: bfcl-eval==2025.6.16; extra == "dev"
123
- Requires-Dist: human-eval; extra == "dev"
124
118
  Requires-Dist: pytest; extra == "dev"
125
119
  Requires-Dist: pytest-cov; extra == "dev"
126
120
  Requires-Dist: python-dotenv; extra == "dev"
@@ -136,6 +130,7 @@ Requires-Dist: ms-opencompass>=0.1.6; extra == "opencompass"
136
130
  Provides-Extra: perf
137
131
  Requires-Dist: aiohttp; extra == "perf"
138
132
  Requires-Dist: fastapi; extra == "perf"
133
+ Requires-Dist: jinja2; extra == "perf"
139
134
  Requires-Dist: numpy; extra == "perf"
140
135
  Requires-Dist: rich; extra == "perf"
141
136
  Requires-Dist: sse-starlette; extra == "perf"
@@ -272,6 +267,7 @@ Please scan the QR code below to join our community groups:
272
267
  >
273
268
  > Version 1.0 introduces a major overhaul of the evaluation framework, establishing a new, more modular and extensible API layer under `evalscope/api`. Key improvements include standardized data models for benchmarks, samples, and results; a registry-based design for components such as benchmarks and metrics; and a rewritten core evaluator that orchestrates the new architecture. Existing benchmark adapters have been migrated to this API, resulting in cleaner, more consistent, and easier-to-maintain implementations.
274
269
 
270
+ - 🔥 **[2025.09.19]** Added support for multimodal image-text evaluation benchmarks including RealWorldQA, AI2D, MMStar, MMBench, and OmniBench, as well as pure text evaluation benchmarks such as Multi-IF, HealthBench, and AMC.
275
271
  - 🔥 **[2025.09.05]** Added support for vision-language multimodal model evaluation tasks, such as MathVista and MMMU. For more supported datasets, please [refer to the documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/vlm.html).
276
272
  - 🔥 **[2025.09.04]** Added support for image editing task evaluation, including the [GEdit-Bench](https://modelscope.cn/datasets/stepfun-ai/GEdit-Bench) benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/image_edit.html).
277
273
  - 🔥 **[2025.08.22]** Version 1.0 Refactoring. Break changes, please [refer to](https://evalscope.readthedocs.io/en/latest/get_started/basic_usage.html#switching-to-version-v1-0).