evalscope 1.0.2__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (176) hide show
  1. evalscope/api/benchmark/__init__.py +8 -1
  2. evalscope/api/benchmark/adapters/__init__.py +1 -0
  3. evalscope/api/benchmark/adapters/default_data_adapter.py +12 -0
  4. evalscope/api/benchmark/adapters/ner_adapter.py +212 -0
  5. evalscope/api/benchmark/benchmark.py +14 -0
  6. evalscope/api/dataset/dataset.py +21 -0
  7. evalscope/api/dataset/loader.py +6 -2
  8. evalscope/api/mixin/sandbox_mixin.py +32 -54
  9. evalscope/api/model/generate_config.py +6 -0
  10. evalscope/app/ui/multi_model.py +6 -1
  11. evalscope/app/ui/single_model.py +8 -2
  12. evalscope/app/utils/data_utils.py +3 -2
  13. evalscope/app/utils/visualization.py +2 -2
  14. evalscope/benchmarks/aa_lcr/aa_lcr_adapter.py +205 -0
  15. evalscope/benchmarks/ai2d/ai2d_adapter.py +3 -2
  16. evalscope/benchmarks/bfcl/bfcl_adapter.py +11 -46
  17. evalscope/benchmarks/blink/__init__.py +0 -0
  18. evalscope/benchmarks/blink/blink_adapter.py +61 -0
  19. evalscope/benchmarks/chartqa/__init__.py +0 -0
  20. evalscope/benchmarks/chartqa/chartqa_adapter.py +80 -0
  21. evalscope/benchmarks/chartqa/utils.py +38 -0
  22. evalscope/benchmarks/data_collection/data_collection_adapter.py +2 -1
  23. evalscope/benchmarks/docvqa/__init__.py +0 -0
  24. evalscope/benchmarks/docvqa/docvqa_adapter.py +67 -0
  25. evalscope/benchmarks/general_arena/general_arena_adapter.py +1 -1
  26. evalscope/benchmarks/general_arena/utils.py +2 -1
  27. evalscope/benchmarks/general_mcq/general_mcq_adapter.py +1 -1
  28. evalscope/benchmarks/general_qa/general_qa_adapter.py +1 -1
  29. evalscope/benchmarks/gsm8k/gsm8k_adapter.py +23 -4
  30. evalscope/benchmarks/hallusion_bench/__init__.py +0 -0
  31. evalscope/benchmarks/hallusion_bench/hallusion_bench_adapter.py +158 -0
  32. evalscope/benchmarks/hle/hle_adapter.py +3 -2
  33. evalscope/benchmarks/humaneval/humaneval_adapter.py +2 -1
  34. evalscope/benchmarks/infovqa/__init__.py +0 -0
  35. evalscope/benchmarks/infovqa/infovqa_adapter.py +66 -0
  36. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +3 -1
  37. evalscope/benchmarks/math_verse/__init__.py +0 -0
  38. evalscope/benchmarks/math_verse/math_verse_adapter.py +100 -0
  39. evalscope/benchmarks/math_vision/__init__.py +0 -0
  40. evalscope/benchmarks/math_vision/math_vision_adapter.py +111 -0
  41. evalscope/benchmarks/math_vista/math_vista_adapter.py +6 -26
  42. evalscope/benchmarks/mm_bench/mm_bench_adapter.py +2 -2
  43. evalscope/benchmarks/mmmu/mmmu_adapter.py +1 -1
  44. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +1 -1
  45. evalscope/benchmarks/ner/__init__.py +0 -0
  46. evalscope/benchmarks/ner/broad_twitter_corpus_adapter.py +52 -0
  47. evalscope/benchmarks/ner/conll2003_adapter.py +48 -0
  48. evalscope/benchmarks/ner/copious_adapter.py +85 -0
  49. evalscope/benchmarks/ner/cross_ner_adapter.py +120 -0
  50. evalscope/benchmarks/ner/cross_ner_entities/__init__.py +0 -0
  51. evalscope/benchmarks/ner/cross_ner_entities/ai.py +54 -0
  52. evalscope/benchmarks/ner/cross_ner_entities/literature.py +36 -0
  53. evalscope/benchmarks/ner/cross_ner_entities/music.py +39 -0
  54. evalscope/benchmarks/ner/cross_ner_entities/politics.py +37 -0
  55. evalscope/benchmarks/ner/cross_ner_entities/science.py +58 -0
  56. evalscope/benchmarks/ner/genia_ner_adapter.py +66 -0
  57. evalscope/benchmarks/ner/harvey_ner_adapter.py +58 -0
  58. evalscope/benchmarks/ner/mit_movie_trivia_adapter.py +74 -0
  59. evalscope/benchmarks/ner/mit_restaurant_adapter.py +66 -0
  60. evalscope/benchmarks/ner/ontonotes5_adapter.py +87 -0
  61. evalscope/benchmarks/ner/wnut2017_adapter.py +61 -0
  62. evalscope/benchmarks/ocr_bench/__init__.py +0 -0
  63. evalscope/benchmarks/ocr_bench/ocr_bench_adapter.py +101 -0
  64. evalscope/benchmarks/ocr_bench_v2/IoUscore_metric.py +87 -0
  65. evalscope/benchmarks/ocr_bench_v2/TEDS_metric.py +963 -0
  66. evalscope/benchmarks/ocr_bench_v2/__init__.py +0 -0
  67. evalscope/benchmarks/ocr_bench_v2/ocr_bench_v2_adapter.py +161 -0
  68. evalscope/benchmarks/ocr_bench_v2/page_ocr_metric.py +50 -0
  69. evalscope/benchmarks/ocr_bench_v2/parallel.py +46 -0
  70. evalscope/benchmarks/ocr_bench_v2/spotting_eval/__init__.py +0 -0
  71. evalscope/benchmarks/ocr_bench_v2/spotting_eval/readme.txt +26 -0
  72. evalscope/benchmarks/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +537 -0
  73. evalscope/benchmarks/ocr_bench_v2/spotting_eval/script.py +481 -0
  74. evalscope/benchmarks/ocr_bench_v2/spotting_metric.py +179 -0
  75. evalscope/benchmarks/ocr_bench_v2/utils.py +433 -0
  76. evalscope/benchmarks/ocr_bench_v2/vqa_metric.py +254 -0
  77. evalscope/benchmarks/omnidoc_bench/__init__.py +0 -0
  78. evalscope/benchmarks/omnidoc_bench/end2end_eval.py +349 -0
  79. evalscope/benchmarks/omnidoc_bench/metrics.py +547 -0
  80. evalscope/benchmarks/omnidoc_bench/omnidoc_bench_adapter.py +135 -0
  81. evalscope/benchmarks/omnidoc_bench/utils.py +1937 -0
  82. evalscope/benchmarks/poly_math/__init__.py +0 -0
  83. evalscope/benchmarks/poly_math/poly_math_adapter.py +127 -0
  84. evalscope/benchmarks/poly_math/utils/instruction.py +105 -0
  85. evalscope/benchmarks/pope/__init__.py +0 -0
  86. evalscope/benchmarks/pope/pope_adapter.py +111 -0
  87. evalscope/benchmarks/seed_bench_2_plus/__init__.py +0 -0
  88. evalscope/benchmarks/seed_bench_2_plus/seed_bench_2_plus_adapter.py +72 -0
  89. evalscope/benchmarks/simple_vqa/__init__.py +0 -0
  90. evalscope/benchmarks/simple_vqa/simple_vqa_adapter.py +169 -0
  91. evalscope/benchmarks/tau_bench/tau_bench_adapter.py +1 -1
  92. evalscope/benchmarks/tool_bench/tool_bench_adapter.py +1 -1
  93. evalscope/benchmarks/visu_logic/__init__.py +0 -0
  94. evalscope/benchmarks/visu_logic/visu_logic_adapter.py +75 -0
  95. evalscope/benchmarks/zerobench/__init__.py +0 -0
  96. evalscope/benchmarks/zerobench/zerobench_adapter.py +64 -0
  97. evalscope/constants.py +4 -0
  98. evalscope/evaluator/evaluator.py +72 -79
  99. evalscope/metrics/math_parser.py +14 -0
  100. evalscope/metrics/metric.py +52 -1
  101. evalscope/metrics/metrics.py +16 -0
  102. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +0 -0
  103. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +0 -0
  104. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +0 -0
  105. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +0 -0
  106. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +0 -0
  107. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +0 -0
  108. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +0 -0
  109. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +0 -0
  110. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +0 -0
  111. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +0 -0
  112. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +2 -6
  113. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +2 -6
  114. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +2 -6
  115. evalscope/models/utils/openai.py +4 -0
  116. evalscope/perf/arguments.py +24 -4
  117. evalscope/perf/benchmark.py +74 -89
  118. evalscope/perf/http_client.py +31 -16
  119. evalscope/perf/main.py +15 -2
  120. evalscope/perf/plugin/api/base.py +9 -7
  121. evalscope/perf/plugin/api/custom_api.py +13 -58
  122. evalscope/perf/plugin/api/default_api.py +179 -79
  123. evalscope/perf/plugin/api/openai_api.py +4 -3
  124. evalscope/perf/plugin/datasets/base.py +21 -0
  125. evalscope/perf/plugin/datasets/custom.py +2 -3
  126. evalscope/perf/plugin/datasets/line_by_line.py +2 -3
  127. evalscope/perf/plugin/datasets/longalpaca.py +2 -3
  128. evalscope/perf/plugin/datasets/openqa.py +2 -4
  129. evalscope/perf/plugin/datasets/random_dataset.py +1 -3
  130. evalscope/perf/utils/benchmark_util.py +36 -22
  131. evalscope/perf/utils/db_util.py +14 -19
  132. evalscope/perf/utils/local_server.py +0 -44
  133. evalscope/perf/utils/log_utils.py +21 -6
  134. evalscope/report/__init__.py +11 -2
  135. evalscope/report/combinator.py +52 -2
  136. evalscope/run.py +4 -0
  137. evalscope/utils/function_utils.py +195 -12
  138. evalscope/utils/io_utils.py +74 -0
  139. evalscope/utils/json_schema.py +8 -6
  140. evalscope/utils/logger.py +49 -17
  141. evalscope/utils/multi_choices.py +16 -1
  142. evalscope/utils/ner.py +377 -0
  143. evalscope/version.py +2 -2
  144. {evalscope-1.0.2.dist-info → evalscope-1.1.1.dist-info}/METADATA +239 -393
  145. {evalscope-1.0.2.dist-info → evalscope-1.1.1.dist-info}/RECORD +140 -98
  146. {evalscope-1.0.2.dist-info → evalscope-1.1.1.dist-info}/WHEEL +1 -1
  147. {evalscope-1.0.2.dist-info → evalscope-1.1.1.dist-info}/top_level.txt +0 -1
  148. tests/__init__.py +0 -1
  149. tests/benchmark/__init__.py +0 -1
  150. tests/benchmark/test_eval.py +0 -429
  151. tests/benchmark/test_image_edit.py +0 -65
  152. tests/benchmark/test_sandbox.py +0 -81
  153. tests/benchmark/test_t2i.py +0 -142
  154. tests/benchmark/test_vlm.py +0 -137
  155. tests/cli/__init__.py +0 -1
  156. tests/cli/test_all.py +0 -269
  157. tests/cli/test_collection.py +0 -99
  158. tests/cli/test_custom.py +0 -268
  159. tests/cli/test_reasoning.py +0 -81
  160. tests/common.py +0 -73
  161. tests/perf/__init__.py +0 -1
  162. tests/perf/test_perf.py +0 -206
  163. tests/rag/test_clip_benchmark.py +0 -87
  164. tests/rag/test_mteb.py +0 -213
  165. tests/rag/test_ragas.py +0 -128
  166. tests/swift/__init__.py +0 -1
  167. tests/swift/test_run_swift_eval.py +0 -146
  168. tests/swift/test_run_swift_vlm_eval.py +0 -128
  169. tests/swift/test_run_swift_vlm_jugde_eval.py +0 -157
  170. tests/test_run_all.py +0 -12
  171. tests/utils.py +0 -13
  172. tests/vlm/__init__.py +0 -1
  173. tests/vlm/test_vlmeval.py +0 -102
  174. {tests/rag → evalscope/benchmarks/aa_lcr}/__init__.py +0 -0
  175. {evalscope-1.0.2.dist-info → evalscope-1.1.1.dist-info}/entry_points.txt +0 -0
  176. {evalscope-1.0.2.dist-info → evalscope-1.1.1.dist-info/licenses}/LICENSE +0 -0
@@ -1,128 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
2
- import json
3
- import os
4
- import requests
5
- import shutil
6
- import subprocess
7
- import time
8
- import unittest
9
-
10
- from evalscope.backend.vlm_eval_kit import VLMEvalKitBackendManager
11
- from evalscope.run import run_task
12
- from evalscope.summarizer import Summarizer
13
- from evalscope.utils.import_utils import is_module_installed
14
- from evalscope.utils.logger import get_logger
15
- from tests.utils import test_level_list
16
-
17
- logger = get_logger(__name__)
18
-
19
- DEFAULT_CHAT_MODEL_URL = 'http://127.0.0.1:8000/v1/chat/completions'
20
- DEFAULT_API_KEY = 'EMPTY'
21
- DEFAULT_MODEL_NAME = 'CustomAPIModel'
22
- DEFAULT_WORK_DIR = 'outputs/qwen-vl-chat'
23
-
24
-
25
- class TestRunSwiftVLMEval(unittest.TestCase):
26
-
27
- def setUp(self) -> None:
28
- logger.info('Init env for swift-eval UTs ...\n')
29
- assert is_module_installed('evalscope'), 'Please install `llmuses` from pypi or source code.'
30
-
31
- if not is_module_installed('vlmeval'):
32
- logger.warning('Note: installing ms-vlmeval ...')
33
- subprocess.run('pip3 install ms-vlmeval -U', shell=True, check=True)
34
-
35
- if not is_module_installed('swift'):
36
- logger.warning('Note: installing ms-swift ...')
37
- subprocess.run('pip3 install ms-swift -U', shell=True, check=True)
38
-
39
- if os.path.exists(DEFAULT_WORK_DIR):
40
- shutil.rmtree(DEFAULT_WORK_DIR)
41
- logger.info(f'Removed work dir: {os.path.abspath(DEFAULT_WORK_DIR)} \n')
42
-
43
- logger.info('\nStaring run swift deploy ...')
44
- self.model_name = 'qwen-vl-chat'
45
- self.process_swift_deploy = subprocess.Popen(
46
- f'swift deploy --model_type {self.model_name} --infer_backend pt', text=True, shell=True)
47
-
48
- self.all_datasets = VLMEvalKitBackendManager.list_supported_datasets()
49
- assert len(self.all_datasets) > 0, f'Failed to list datasets from VLMEvalKit backend: {self.all_datasets}'
50
-
51
- def tearDown(self) -> None:
52
- # Stop the swift deploy model service
53
- logger.warning('Stopping swift deploy ...')
54
- self.process_swift_deploy.terminate()
55
- self.process_swift_deploy.wait()
56
- logger.info('Process swift-deploy terminated successfully.')
57
-
58
- @staticmethod
59
- def _check_env(module_name: str):
60
- if is_module_installed(module_name):
61
- logger.info(f'{module_name} is installed.')
62
- else:
63
- raise ModuleNotFoundError(f'run: pip install {module_name}')
64
-
65
- @staticmethod
66
- def check_service_status(url: str, data: dict, retries: int = 20, delay: int = 10):
67
- for i in range(retries):
68
- try:
69
- logger.info(f'Attempt {i + 1}: Checking service at {url} ...')
70
- response = requests.post(
71
- url, data=json.dumps(data), headers={'Content-Type': 'application/json'}, timeout=30)
72
- if response.status_code == 200:
73
- logger.info(f'Service at {url} is available !\n\n')
74
- return True
75
- else:
76
- logger.info(f'Service at {url} returned status code {response.status_code}.')
77
- except requests.exceptions.RequestException as e:
78
- logger.info(f'Attempt {i + 1}: An error occurred: {e}')
79
-
80
- time.sleep(delay)
81
-
82
- logger.info(f'Service at {url} is not available after {retries} retries.')
83
- return False
84
-
85
- @unittest.skipUnless(0 in test_level_list(), 'skip test in current test level')
86
- def test_run_api(self):
87
- api_base = DEFAULT_CHAT_MODEL_URL
88
- task_cfg = {
89
- 'eval_backend': 'VLMEvalKit',
90
- 'eval_config': {
91
- 'data': ['SEEDBench_IMG', 'ChartQA_TEST'],
92
- 'limit':
93
- 30,
94
- 'mode':
95
- 'all',
96
- 'model': [{
97
- 'api_base': api_base, # swfit deploy model api
98
- 'key': DEFAULT_API_KEY,
99
- 'name': DEFAULT_MODEL_NAME, # must be CustomAPIModel for swift
100
- 'temperature': 0.0,
101
- 'type': self.model_name
102
- }], # swift model type
103
- 'nproc':
104
- 1,
105
- 'reuse':
106
- True,
107
- 'work_dir':
108
- DEFAULT_WORK_DIR
109
- }
110
- }
111
-
112
- # Check the service status
113
- data = {'model': self.model_name, 'messages': [{'role': 'user', 'content': 'who are you?'}]}
114
- assert self.check_service_status(api_base, data=data), f'Failed to check service status: {api_base}'
115
-
116
- logger.info(f'>> Start to run task: {task_cfg}')
117
-
118
- run_task(task_cfg)
119
-
120
- logger.info('>> Start to get the report with summarizer ...')
121
- report_list = Summarizer.get_report_from_cfg(task_cfg)
122
- logger.info(f'\n>> The report list: {report_list}')
123
-
124
- assert len(report_list) > 0, f'Failed to get report list: {report_list}'
125
-
126
-
127
- if __name__ == '__main__':
128
- unittest.main(buffer=False)
@@ -1,157 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
2
- import json
3
- import os
4
- import requests
5
- import shutil
6
- import subprocess
7
- import time
8
- import unittest
9
-
10
- from evalscope.backend.vlm_eval_kit import VLMEvalKitBackendManager
11
- from evalscope.run import run_task
12
- from evalscope.summarizer import Summarizer
13
- from evalscope.utils.import_utils import is_module_installed
14
- from evalscope.utils.logger import get_logger
15
- from tests.utils import test_level_list
16
-
17
- logger = get_logger(__name__)
18
-
19
- DEFAULT_CHAT_MODEL_URL = 'http://127.0.0.1:8000/v1/chat/completions'
20
- DEFAULT_JUDGE_MODEL_URL = 'http://127.0.0.1:8866/v1/chat/completions'
21
-
22
- DEFAULT_API_KEY = 'EMPTY'
23
- DEFAULT_MODEL_NAME = 'CustomAPIModel'
24
- DEFAULT_WORK_DIR = 'outputs'
25
-
26
-
27
- class TestRunSwiftVLMEval(unittest.TestCase):
28
-
29
- def setUp(self) -> None:
30
- logger.info('Init env for swift-eval UTs ...\n')
31
- assert is_module_installed('llmuses'), 'Please install `llmuses` from pypi or source code.'
32
-
33
- logger.warning('Note: installing ms-vlmeval ...')
34
- subprocess.run('pip3 install ms-vlmeval -U', shell=True, check=True)
35
-
36
- logger.warning('Note: installing ms-swift ...')
37
- subprocess.run('pip3 install ms-swift -U', shell=True, check=True)
38
-
39
- if os.path.exists(DEFAULT_WORK_DIR):
40
- shutil.rmtree(DEFAULT_WORK_DIR)
41
- logger.info(f'Removed work dir: {os.path.abspath(DEFAULT_WORK_DIR)} \n')
42
-
43
- logger.info('\nStaring run swift deploy ...')
44
- self.model_name = 'qwen-vl-chat'
45
- self.process_swift_deploy = subprocess.Popen(
46
- f'swift deploy --model_type {self.model_name}',
47
- text=True,
48
- shell=True,
49
- stdout=subprocess.PIPE,
50
- stderr=subprocess.PIPE)
51
-
52
- logger.info('\nStaring run swift deploy judge ...')
53
- self.judge_model_name = 'qwen2-7b-instruct'
54
- self.process_swift_deploy_judge = subprocess.Popen(
55
- f'swift deploy --model_type {self.judge_model_name} \
56
- --port 8866',
57
- text=True,
58
- shell=True,
59
- stdout=subprocess.PIPE,
60
- stderr=subprocess.PIPE)
61
-
62
- self.all_datasets = VLMEvalKitBackendManager.list_supported_datasets()
63
- assert len(self.all_datasets) > 0, f'Failed to list datasets from VLMEvalKit backend: {self.all_datasets}'
64
-
65
- def tearDown(self) -> None:
66
- # Stop the swift deploy model service
67
- logger.warning('\nStopping swift deploy ...')
68
- self.process_swift_deploy.terminate()
69
- self.process_swift_deploy.wait()
70
-
71
- self.process_swift_deploy_judge.terminate()
72
- self.process_swift_deploy_judge.wait()
73
-
74
- logger.info('Process swift-deploy terminated successfully.')
75
-
76
- @staticmethod
77
- def _check_env(module_name: str):
78
- if is_module_installed(module_name):
79
- logger.info(f'{module_name} is installed.')
80
- else:
81
- raise ModuleNotFoundError(f'run: pip install {module_name}')
82
-
83
- @staticmethod
84
- def check_service_status(url: str, data: dict, retries: int = 20, delay: int = 10):
85
- for i in range(retries):
86
- try:
87
- logger.info(f'Attempt {i + 1}: Checking service at {url} ...')
88
- response = requests.post(
89
- url, data=json.dumps(data), headers={'Content-Type': 'application/json'}, timeout=30)
90
- if response.status_code == 200:
91
- logger.info(f'Service at {url} is available !\n\n')
92
- return True
93
- else:
94
- logger.info(f'Service at {url} returned status code {response.status_code}.')
95
- except requests.exceptions.RequestException as e:
96
- logger.info(f'Attempt {i + 1}: An error occurred: {e}')
97
-
98
- time.sleep(delay)
99
-
100
- logger.info(f'Service at {url} is not available after {retries} retries.')
101
- return False
102
-
103
- @unittest.skipUnless(0 in test_level_list(), 'skip test in current test level')
104
- def test_run_api_with_judge(self):
105
- # Check the judge service status
106
- data = {'model': self.judge_model_name, 'messages': [{'role': 'user', 'content': 'who are you?'}]}
107
- assert self.check_service_status(DEFAULT_JUDGE_MODEL_URL, data=data), \
108
- f'Failed to check judge service status: {DEFAULT_JUDGE_MODEL_URL}'
109
-
110
- api_base = DEFAULT_CHAT_MODEL_URL
111
- task_cfg = {
112
- 'eval_backend': 'VLMEvalKit',
113
- 'eval_config': {
114
- 'LOCAL_LLM':
115
- self.judge_model_name, # judge model id
116
- 'OPENAI_API_BASE':
117
- DEFAULT_JUDGE_MODEL_URL, # judge model api
118
- 'OPENAI_API_KEY':
119
- DEFAULT_API_KEY,
120
- 'data': ['SEEDBench_IMG', 'ChartQA_TEST'],
121
- 'limit':
122
- 20,
123
- 'mode':
124
- 'all',
125
- 'model': [{
126
- 'api_base': api_base, # swfit deploy model api
127
- 'key': DEFAULT_API_KEY,
128
- 'name': DEFAULT_MODEL_NAME, # must be CustomAPIModel for swift
129
- 'temperature': 0.0,
130
- 'type': self.model_name
131
- }], # swift model type
132
- 'nproc':
133
- 1,
134
- 'reuse':
135
- True,
136
- 'work_dir':
137
- DEFAULT_WORK_DIR
138
- }
139
- }
140
-
141
- # Check the service status
142
- data = {'model': self.model_name, 'messages': [{'role': 'user', 'content': 'who are you?'}]}
143
- assert self.check_service_status(api_base, data=data), f'Failed to check service status: {api_base}'
144
-
145
- logger.info(f'>> Start to run task: {task_cfg}')
146
-
147
- run_task(task_cfg)
148
-
149
- logger.info('>> Start to get the report with summarizer ...')
150
- report_list = Summarizer.get_report_from_cfg(task_cfg)
151
- logger.info(f'\n>> The report list: {report_list}')
152
-
153
- assert len(report_list) > 0, f'Failed to get report list: {report_list}'
154
-
155
-
156
- if __name__ == '__main__':
157
- unittest.main(buffer=False)
tests/test_run_all.py DELETED
@@ -1,12 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
2
-
3
- import subprocess
4
-
5
- if __name__ == '__main__':
6
- cmd = f'TEST_LEVEL_LIST=0,1 python3 -m unittest discover tests'
7
- run_res = subprocess.run(cmd, text=True, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
8
-
9
- if run_res.returncode == 0:
10
- print(f'>>test_run_all stdout: {run_res.stdout}')
11
- else:
12
- print(f'>>test_run_all stderr: {run_res.stderr}')
tests/utils.py DELETED
@@ -1,13 +0,0 @@
1
- import os
2
-
3
- TEST_LEVEL_LIST = [0, 1]
4
- # Example: export TEST_LEVEL_LIST=0,1
5
- TEST_LEVEL_LIST_STR = 'TEST_LEVEL_LIST'
6
-
7
-
8
- def test_level_list():
9
- global TEST_LEVEL_LIST
10
- if TEST_LEVEL_LIST_STR in os.environ:
11
- TEST_LEVEL_LIST = [int(x) for x in os.environ[TEST_LEVEL_LIST_STR].split(',')]
12
-
13
- return TEST_LEVEL_LIST
tests/vlm/__init__.py DELETED
@@ -1 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
tests/vlm/test_vlmeval.py DELETED
@@ -1,102 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
2
- from dotenv import dotenv_values
3
-
4
- from tests.utils import test_level_list
5
-
6
- env = dotenv_values('.env')
7
- import unittest
8
-
9
- from evalscope.run import run_task
10
- from evalscope.summarizer import Summarizer
11
- from evalscope.utils.import_utils import is_module_installed
12
- from evalscope.utils.logger import get_logger
13
-
14
- logger = get_logger()
15
-
16
-
17
- class TestVLMEval(unittest.TestCase):
18
-
19
- def setUp(self) -> None:
20
- self._check_env('vlmeval')
21
-
22
- def tearDown(self) -> None:
23
- pass
24
-
25
- @staticmethod
26
- def _check_env(module_name: str):
27
- if is_module_installed(module_name):
28
- logger.info(f'{module_name} is installed.')
29
- else:
30
- raise ModuleNotFoundError(f'run: pip install {module_name}')
31
-
32
- @unittest.skipUnless(0 in test_level_list(), 'skip test in current test level')
33
- def test_run_vlm_eval_local(self):
34
- task_cfg = {
35
- 'eval_backend': 'VLMEvalKit',
36
- 'eval_config': {
37
- 'data': ['SEEDBench_IMG', 'ChartQA_TEST'],
38
- 'limit': 20,
39
- 'mode': 'all',
40
- 'model': [{
41
- 'name': 'qwen-vl-chat',
42
- 'model_path': '../models/Qwen-VL-Chat'
43
- }], # model name for VLMEval config
44
- 'nproc': 1,
45
- 'reuse': True,
46
- },
47
- 'work_dir': 'outputs',
48
- 'use_cache': 'outputs/20241216_142838'
49
- }
50
-
51
- logger.info(f'>> Start to run task: {task_cfg}')
52
-
53
- run_task(task_cfg)
54
-
55
- logger.info('>> Start to get the report with summarizer ...')
56
- report_list = Summarizer.get_report_from_cfg(task_cfg)
57
- logger.info(f'\n>>The report list: {report_list}')
58
-
59
- assert len(report_list) > 0, f'Failed to get report list: {report_list}'
60
-
61
-
62
- @unittest.skipUnless(0 in test_level_list(), 'skip test in current test level')
63
- def test_run_vlm_api(self):
64
- task_cfg = {
65
- 'eval_backend': 'VLMEvalKit',
66
- 'eval_config': {
67
- 'data': [
68
- # 'SEEDBench_IMG',
69
- # 'ChartQA_TEST',
70
- 'MMDU'
71
- ],
72
- 'limit': 5,
73
- 'mode': 'all',
74
- 'model': [
75
- {'api_base': 'https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions',
76
- 'key': env.get('DASHSCOPE_API_KEY'),
77
- 'name': 'CustomAPIModel',
78
- 'temperature': 0.0,
79
- 'type': 'qwen2.5-vl-7b-instruct',
80
- 'img_size': -1,
81
- 'video_llm': False,
82
- 'max_tokens': 512,}
83
- ],
84
- 'nproc': 5,
85
- 'reuse': False,
86
- },
87
- 'work_dir': 'outputs',
88
- # 'use_cache': 'outputs/20241216_142838'
89
- }
90
-
91
- logger.info(f'>> Start to run task: {task_cfg}')
92
-
93
- run_task(task_cfg)
94
-
95
- logger.info('>> Start to get the report with summarizer ...')
96
- report_list = Summarizer.get_report_from_cfg(task_cfg)
97
- logger.info(f'\n>>The report list: {report_list}')
98
-
99
- assert len(report_list) > 0, f'Failed to get report list: {report_list}'
100
-
101
- if __name__ == '__main__':
102
- unittest.main(buffer=False)
File without changes