evalscope 1.0.1__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of evalscope might be problematic. Click here for more details.
- evalscope/api/benchmark/adapters/default_data_adapter.py +18 -4
- evalscope/api/benchmark/adapters/multi_choice_adapter.py +5 -2
- evalscope/api/benchmark/adapters/text2image_adapter.py +5 -4
- evalscope/api/benchmark/adapters/vision_language_adapter.py +3 -1
- evalscope/api/benchmark/benchmark.py +27 -2
- evalscope/api/benchmark/meta.py +3 -0
- evalscope/api/evaluator/evaluator.py +5 -0
- evalscope/api/evaluator/state.py +5 -0
- evalscope/api/messages/chat_message.py +6 -1
- evalscope/api/mixin/__init__.py +1 -0
- evalscope/api/mixin/llm_judge_mixin.py +2 -0
- evalscope/api/mixin/sandbox_mixin.py +204 -0
- evalscope/api/model/generate_config.py +0 -3
- evalscope/api/model/model.py +1 -1
- evalscope/api/tool/tool_info.py +1 -1
- evalscope/app/ui/multi_model.py +6 -1
- evalscope/app/ui/single_model.py +8 -2
- evalscope/app/utils/data_utils.py +3 -2
- evalscope/app/utils/visualization.py +2 -2
- evalscope/arguments.py +6 -0
- evalscope/benchmarks/ai2d/ai2d_adapter.py +54 -0
- evalscope/benchmarks/amc/__init__.py +0 -0
- evalscope/benchmarks/amc/amc_adapter.py +46 -0
- evalscope/benchmarks/bbh/bbh_adapter.py +43 -17
- evalscope/benchmarks/bfcl/bfcl_adapter.py +106 -2
- evalscope/benchmarks/bfcl/generation.py +7 -7
- evalscope/benchmarks/blink/__init__.py +0 -0
- evalscope/benchmarks/blink/blink_adapter.py +61 -0
- evalscope/benchmarks/chartqa/__init__.py +0 -0
- evalscope/benchmarks/chartqa/chartqa_adapter.py +80 -0
- evalscope/benchmarks/chartqa/utils.py +38 -0
- evalscope/benchmarks/docvqa/__init__.py +0 -0
- evalscope/benchmarks/docvqa/docvqa_adapter.py +67 -0
- evalscope/benchmarks/drop/drop_adapter.py +1 -1
- evalscope/benchmarks/general_arena/utils.py +2 -1
- evalscope/benchmarks/healthbench/__init__.py +0 -0
- evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
- evalscope/benchmarks/healthbench/utils.py +102 -0
- evalscope/benchmarks/hle/hle_adapter.py +3 -2
- evalscope/benchmarks/humaneval/humaneval_adapter.py +19 -35
- evalscope/benchmarks/humaneval/utils.py +235 -0
- evalscope/benchmarks/infovqa/__init__.py +0 -0
- evalscope/benchmarks/infovqa/infovqa_adapter.py +66 -0
- evalscope/benchmarks/live_code_bench/evaluate_utils.py +13 -6
- evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +60 -37
- evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
- evalscope/benchmarks/math_500/math_500_adapter.py +0 -1
- evalscope/benchmarks/minerva_math/__init__.py +0 -0
- evalscope/benchmarks/minerva_math/minerva_math_adapter.py +48 -0
- evalscope/benchmarks/mm_bench/__init__.py +0 -0
- evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
- evalscope/benchmarks/mm_star/__init__.py +0 -0
- evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
- evalscope/benchmarks/mmmu/mmmu_adapter.py +1 -1
- evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +4 -9
- evalscope/benchmarks/multi_if/__init__.py +0 -0
- evalscope/benchmarks/multi_if/ifeval.py +3354 -0
- evalscope/benchmarks/multi_if/metrics.py +120 -0
- evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
- evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +1 -4
- evalscope/benchmarks/ocr_bench/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_adapter.py +101 -0
- evalscope/benchmarks/ocr_bench_v2/IoUscore_metric.py +87 -0
- evalscope/benchmarks/ocr_bench_v2/TEDS_metric.py +963 -0
- evalscope/benchmarks/ocr_bench_v2/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench_v2/ocr_bench_v2_adapter.py +161 -0
- evalscope/benchmarks/ocr_bench_v2/page_ocr_metric.py +50 -0
- evalscope/benchmarks/ocr_bench_v2/parallel.py +46 -0
- evalscope/benchmarks/ocr_bench_v2/spotting_eval/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench_v2/spotting_eval/readme.txt +26 -0
- evalscope/benchmarks/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +537 -0
- evalscope/benchmarks/ocr_bench_v2/spotting_eval/script.py +481 -0
- evalscope/benchmarks/ocr_bench_v2/spotting_metric.py +179 -0
- evalscope/benchmarks/ocr_bench_v2/utils.py +432 -0
- evalscope/benchmarks/ocr_bench_v2/vqa_metric.py +254 -0
- evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
- evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
- evalscope/benchmarks/olympiad_bench/utils.py +565 -0
- evalscope/benchmarks/omni_bench/__init__.py +0 -0
- evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
- evalscope/benchmarks/real_world_qa/__init__.py +0 -0
- evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
- evalscope/benchmarks/tau_bench/tau_bench_adapter.py +6 -1
- evalscope/config.py +24 -1
- evalscope/constants.py +3 -0
- evalscope/evaluator/evaluator.py +25 -7
- evalscope/metrics/metric.py +78 -2
- evalscope/metrics/metrics.py +16 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +2 -6
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +2 -6
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +2 -6
- evalscope/models/model_apis.py +10 -8
- evalscope/models/utils/openai.py +1 -2
- evalscope/perf/arguments.py +2 -0
- evalscope/perf/plugin/api/base.py +2 -2
- evalscope/perf/plugin/api/default_api.py +7 -7
- evalscope/perf/plugin/api/openai_api.py +83 -19
- evalscope/perf/plugin/datasets/flickr8k.py +2 -2
- evalscope/perf/plugin/datasets/kontext_bench.py +2 -2
- evalscope/perf/plugin/datasets/random_vl_dataset.py +2 -2
- evalscope/perf/utils/benchmark_util.py +1 -2
- evalscope/report/__init__.py +9 -1
- evalscope/report/combinator.py +45 -20
- evalscope/report/report.py +8 -4
- evalscope/run.py +1 -1
- evalscope/utils/function_utils.py +41 -0
- evalscope/utils/import_utils.py +63 -13
- evalscope/utils/io_utils.py +19 -11
- evalscope/utils/json_schema.py +25 -2
- evalscope/utils/logger.py +19 -0
- evalscope/utils/model_utils.py +1 -1
- evalscope/utils/multi_choices.py +16 -1
- evalscope/version.py +2 -2
- {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/METADATA +10 -40
- {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/RECORD +120 -95
- {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/top_level.txt +0 -1
- tests/__init__.py +0 -1
- tests/benchmark/__init__.py +0 -1
- tests/benchmark/test_eval.py +0 -385
- tests/benchmark/test_image_edit.py +0 -65
- tests/benchmark/test_t2i.py +0 -142
- tests/benchmark/test_vlm.py +0 -80
- tests/cli/__init__.py +0 -1
- tests/cli/test_all.py +0 -269
- tests/cli/test_collection.py +0 -99
- tests/cli/test_custom.py +0 -268
- tests/cli/test_reasoning.py +0 -81
- tests/common.py +0 -73
- tests/perf/__init__.py +0 -1
- tests/perf/test_perf.py +0 -178
- tests/rag/test_clip_benchmark.py +0 -87
- tests/rag/test_mteb.py +0 -213
- tests/rag/test_ragas.py +0 -128
- tests/swift/__init__.py +0 -1
- tests/swift/test_run_swift_eval.py +0 -146
- tests/swift/test_run_swift_vlm_eval.py +0 -128
- tests/swift/test_run_swift_vlm_jugde_eval.py +0 -157
- tests/test_run_all.py +0 -12
- tests/utils.py +0 -13
- tests/vlm/__init__.py +0 -1
- tests/vlm/test_vlmeval.py +0 -102
- {tests/rag → evalscope/benchmarks/ai2d}/__init__.py +0 -0
- {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/LICENSE +0 -0
- {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/WHEEL +0 -0
- {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/entry_points.txt +0 -0
tests/vlm/test_vlmeval.py
DELETED
|
@@ -1,102 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
|
-
from dotenv import dotenv_values
|
|
3
|
-
|
|
4
|
-
from tests.utils import test_level_list
|
|
5
|
-
|
|
6
|
-
env = dotenv_values('.env')
|
|
7
|
-
import unittest
|
|
8
|
-
|
|
9
|
-
from evalscope.run import run_task
|
|
10
|
-
from evalscope.summarizer import Summarizer
|
|
11
|
-
from evalscope.utils.import_utils import is_module_installed
|
|
12
|
-
from evalscope.utils.logger import get_logger
|
|
13
|
-
|
|
14
|
-
logger = get_logger()
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
class TestVLMEval(unittest.TestCase):
|
|
18
|
-
|
|
19
|
-
def setUp(self) -> None:
|
|
20
|
-
self._check_env('vlmeval')
|
|
21
|
-
|
|
22
|
-
def tearDown(self) -> None:
|
|
23
|
-
pass
|
|
24
|
-
|
|
25
|
-
@staticmethod
|
|
26
|
-
def _check_env(module_name: str):
|
|
27
|
-
if is_module_installed(module_name):
|
|
28
|
-
logger.info(f'{module_name} is installed.')
|
|
29
|
-
else:
|
|
30
|
-
raise ModuleNotFoundError(f'run: pip install {module_name}')
|
|
31
|
-
|
|
32
|
-
@unittest.skipUnless(0 in test_level_list(), 'skip test in current test level')
|
|
33
|
-
def test_run_vlm_eval_local(self):
|
|
34
|
-
task_cfg = {
|
|
35
|
-
'eval_backend': 'VLMEvalKit',
|
|
36
|
-
'eval_config': {
|
|
37
|
-
'data': ['SEEDBench_IMG', 'ChartQA_TEST'],
|
|
38
|
-
'limit': 20,
|
|
39
|
-
'mode': 'all',
|
|
40
|
-
'model': [{
|
|
41
|
-
'name': 'qwen-vl-chat',
|
|
42
|
-
'model_path': '../models/Qwen-VL-Chat'
|
|
43
|
-
}], # model name for VLMEval config
|
|
44
|
-
'nproc': 1,
|
|
45
|
-
'reuse': True,
|
|
46
|
-
},
|
|
47
|
-
'work_dir': 'outputs',
|
|
48
|
-
'use_cache': 'outputs/20241216_142838'
|
|
49
|
-
}
|
|
50
|
-
|
|
51
|
-
logger.info(f'>> Start to run task: {task_cfg}')
|
|
52
|
-
|
|
53
|
-
run_task(task_cfg)
|
|
54
|
-
|
|
55
|
-
logger.info('>> Start to get the report with summarizer ...')
|
|
56
|
-
report_list = Summarizer.get_report_from_cfg(task_cfg)
|
|
57
|
-
logger.info(f'\n>>The report list: {report_list}')
|
|
58
|
-
|
|
59
|
-
assert len(report_list) > 0, f'Failed to get report list: {report_list}'
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
@unittest.skipUnless(0 in test_level_list(), 'skip test in current test level')
|
|
63
|
-
def test_run_vlm_api(self):
|
|
64
|
-
task_cfg = {
|
|
65
|
-
'eval_backend': 'VLMEvalKit',
|
|
66
|
-
'eval_config': {
|
|
67
|
-
'data': [
|
|
68
|
-
# 'SEEDBench_IMG',
|
|
69
|
-
# 'ChartQA_TEST',
|
|
70
|
-
'MMDU'
|
|
71
|
-
],
|
|
72
|
-
'limit': 5,
|
|
73
|
-
'mode': 'all',
|
|
74
|
-
'model': [
|
|
75
|
-
{'api_base': 'https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions',
|
|
76
|
-
'key': env.get('DASHSCOPE_API_KEY'),
|
|
77
|
-
'name': 'CustomAPIModel',
|
|
78
|
-
'temperature': 0.0,
|
|
79
|
-
'type': 'qwen2.5-vl-7b-instruct',
|
|
80
|
-
'img_size': -1,
|
|
81
|
-
'video_llm': False,
|
|
82
|
-
'max_tokens': 512,}
|
|
83
|
-
],
|
|
84
|
-
'nproc': 5,
|
|
85
|
-
'reuse': False,
|
|
86
|
-
},
|
|
87
|
-
'work_dir': 'outputs',
|
|
88
|
-
# 'use_cache': 'outputs/20241216_142838'
|
|
89
|
-
}
|
|
90
|
-
|
|
91
|
-
logger.info(f'>> Start to run task: {task_cfg}')
|
|
92
|
-
|
|
93
|
-
run_task(task_cfg)
|
|
94
|
-
|
|
95
|
-
logger.info('>> Start to get the report with summarizer ...')
|
|
96
|
-
report_list = Summarizer.get_report_from_cfg(task_cfg)
|
|
97
|
-
logger.info(f'\n>>The report list: {report_list}')
|
|
98
|
-
|
|
99
|
-
assert len(report_list) > 0, f'Failed to get report list: {report_list}'
|
|
100
|
-
|
|
101
|
-
if __name__ == '__main__':
|
|
102
|
-
unittest.main(buffer=False)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|