evalscope 1.0.0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (324) hide show
  1. evalscope/api/benchmark/__init__.py +9 -1
  2. evalscope/api/benchmark/adapters/__init__.py +4 -0
  3. evalscope/api/benchmark/adapters/agent_adapter.py +8 -0
  4. evalscope/api/benchmark/adapters/default_data_adapter.py +75 -4
  5. evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
  6. evalscope/api/benchmark/adapters/multi_choice_adapter.py +5 -2
  7. evalscope/api/benchmark/adapters/ner_adapter.py +212 -0
  8. evalscope/api/benchmark/adapters/text2image_adapter.py +12 -10
  9. evalscope/api/benchmark/adapters/vision_language_adapter.py +8 -0
  10. evalscope/api/benchmark/benchmark.py +85 -2
  11. evalscope/api/benchmark/meta.py +10 -1
  12. evalscope/api/dataset/dataset.py +27 -6
  13. evalscope/api/dataset/loader.py +8 -3
  14. evalscope/api/evaluator/cache.py +31 -4
  15. evalscope/api/evaluator/evaluator.py +5 -0
  16. evalscope/api/evaluator/state.py +17 -1
  17. evalscope/api/messages/__init__.py +1 -0
  18. evalscope/api/messages/chat_message.py +52 -2
  19. evalscope/api/metric/__init__.py +1 -1
  20. evalscope/api/metric/metric.py +6 -1
  21. evalscope/api/metric/scorer.py +15 -7
  22. evalscope/api/mixin/__init__.py +1 -1
  23. evalscope/api/mixin/llm_judge_mixin.py +2 -0
  24. evalscope/api/mixin/sandbox_mixin.py +182 -0
  25. evalscope/api/model/generate_config.py +10 -6
  26. evalscope/api/model/model.py +5 -2
  27. evalscope/api/tool/tool_info.py +1 -1
  28. evalscope/app/app.py +3 -0
  29. evalscope/app/ui/multi_model.py +6 -1
  30. evalscope/app/ui/single_model.py +11 -5
  31. evalscope/app/utils/data_utils.py +8 -7
  32. evalscope/app/utils/env_utils.py +12 -0
  33. evalscope/app/utils/text_utils.py +14 -12
  34. evalscope/app/utils/visualization.py +2 -2
  35. evalscope/arguments.py +8 -4
  36. evalscope/backend/opencompass/backend_manager.py +0 -2
  37. evalscope/backend/rag_eval/utils/embedding.py +9 -1
  38. evalscope/benchmarks/aa_lcr/aa_lcr_adapter.py +205 -0
  39. evalscope/benchmarks/ai2d/ai2d_adapter.py +54 -0
  40. evalscope/benchmarks/aime/aime24_adapter.py +5 -0
  41. evalscope/benchmarks/aime/aime25_adapter.py +136 -1
  42. evalscope/benchmarks/aime/grader.py +307 -0
  43. evalscope/benchmarks/aime/math_normalize.py +189 -0
  44. evalscope/benchmarks/amc/amc_adapter.py +51 -0
  45. evalscope/benchmarks/arena_hard/arena_hard_adapter.py +1 -0
  46. evalscope/benchmarks/bbh/bbh_adapter.py +43 -17
  47. evalscope/benchmarks/bfcl/{bfcl_adapter.py → v3/bfcl_v3_adapter.py} +131 -19
  48. evalscope/benchmarks/bfcl/{generation.py → v3/generation.py} +9 -9
  49. evalscope/benchmarks/bfcl/v3/utils.py +23 -0
  50. evalscope/benchmarks/bfcl/v4/__init__.py +0 -0
  51. evalscope/benchmarks/bfcl/v4/bfcl_v4_adapter.py +229 -0
  52. evalscope/benchmarks/bfcl/v4/utils.py +410 -0
  53. evalscope/benchmarks/biomix_qa/__init__.py +0 -0
  54. evalscope/benchmarks/biomix_qa/biomix_qa_adapter.py +36 -0
  55. evalscope/benchmarks/blink/__init__.py +0 -0
  56. evalscope/benchmarks/blink/blink_adapter.py +61 -0
  57. evalscope/benchmarks/ceval/ceval_adapter.py +1 -2
  58. evalscope/benchmarks/chartqa/__init__.py +0 -0
  59. evalscope/benchmarks/chartqa/chartqa_adapter.py +80 -0
  60. evalscope/benchmarks/chartqa/utils.py +38 -0
  61. evalscope/benchmarks/coin_flip/__init__.py +0 -0
  62. evalscope/benchmarks/coin_flip/coin_flip_adapter.py +128 -0
  63. evalscope/benchmarks/commonsense_qa/__init__.py +0 -0
  64. evalscope/benchmarks/commonsense_qa/commonsense_qa_adapter.py +32 -0
  65. evalscope/benchmarks/competition_math/competition_math_adapter.py +5 -0
  66. evalscope/benchmarks/data_collection/data_collection_adapter.py +24 -19
  67. evalscope/benchmarks/docvqa/__init__.py +0 -0
  68. evalscope/benchmarks/docvqa/docvqa_adapter.py +67 -0
  69. evalscope/benchmarks/drivelology/__init__.py +0 -0
  70. evalscope/benchmarks/drivelology/drivelology_binary_adapter.py +170 -0
  71. evalscope/benchmarks/drivelology/drivelology_multilabel_adapter.py +254 -0
  72. evalscope/benchmarks/drivelology/drivelology_selection_adapter.py +49 -0
  73. evalscope/benchmarks/drivelology/drivelology_writing_adapter.py +218 -0
  74. evalscope/benchmarks/drop/drop_adapter.py +15 -44
  75. evalscope/benchmarks/drop/utils.py +97 -0
  76. evalscope/benchmarks/frames/frames_adapter.py +2 -1
  77. evalscope/benchmarks/general_arena/general_arena_adapter.py +7 -2
  78. evalscope/benchmarks/general_arena/utils.py +2 -1
  79. evalscope/benchmarks/general_mcq/general_mcq_adapter.py +1 -1
  80. evalscope/benchmarks/general_qa/general_qa_adapter.py +1 -1
  81. evalscope/benchmarks/gsm8k/gsm8k_adapter.py +25 -9
  82. evalscope/benchmarks/hallusion_bench/__init__.py +0 -0
  83. evalscope/benchmarks/hallusion_bench/hallusion_bench_adapter.py +159 -0
  84. evalscope/benchmarks/halu_eval/__init__.py +0 -0
  85. evalscope/benchmarks/halu_eval/halu_eval_adapter.py +128 -0
  86. evalscope/benchmarks/halu_eval/halu_eval_instructions.py +84 -0
  87. evalscope/benchmarks/healthbench/__init__.py +0 -0
  88. evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
  89. evalscope/benchmarks/healthbench/utils.py +102 -0
  90. evalscope/benchmarks/hle/hle_adapter.py +3 -2
  91. evalscope/benchmarks/humaneval/humaneval_adapter.py +24 -52
  92. evalscope/benchmarks/humaneval/utils.py +235 -0
  93. evalscope/benchmarks/ifeval/instructions_util.py +2 -3
  94. evalscope/benchmarks/image_edit/__init__.py +0 -0
  95. evalscope/benchmarks/image_edit/gedit/__init__.py +0 -0
  96. evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
  97. evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
  98. evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
  99. evalscope/benchmarks/infovqa/__init__.py +0 -0
  100. evalscope/benchmarks/infovqa/infovqa_adapter.py +66 -0
  101. evalscope/benchmarks/live_code_bench/evaluate_utils.py +13 -6
  102. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +66 -54
  103. evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
  104. evalscope/benchmarks/logi_qa/__int__.py +0 -0
  105. evalscope/benchmarks/logi_qa/logi_qa_adapter.py +41 -0
  106. evalscope/benchmarks/math_500/math_500_adapter.py +5 -1
  107. evalscope/benchmarks/math_qa/__init__.py +0 -0
  108. evalscope/benchmarks/math_qa/math_qa_adapter.py +35 -0
  109. evalscope/benchmarks/math_verse/__init__.py +0 -0
  110. evalscope/benchmarks/math_verse/math_verse_adapter.py +105 -0
  111. evalscope/benchmarks/math_vision/__init__.py +0 -0
  112. evalscope/benchmarks/math_vision/math_vision_adapter.py +116 -0
  113. evalscope/benchmarks/math_vista/__init__.py +0 -0
  114. evalscope/benchmarks/math_vista/math_vista_adapter.py +114 -0
  115. evalscope/benchmarks/med_mcqa/__init__.py +0 -0
  116. evalscope/benchmarks/med_mcqa/med_mcqa_adapter.py +32 -0
  117. evalscope/benchmarks/minerva_math/__init__.py +0 -0
  118. evalscope/benchmarks/minerva_math/minerva_math_adapter.py +53 -0
  119. evalscope/benchmarks/mm_bench/__init__.py +0 -0
  120. evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
  121. evalscope/benchmarks/mm_star/__init__.py +0 -0
  122. evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
  123. evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +1 -1
  124. evalscope/benchmarks/mmmu/__init__.py +0 -0
  125. evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
  126. evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
  127. evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +124 -0
  128. evalscope/benchmarks/mri_mcqa/__init__.py +0 -0
  129. evalscope/benchmarks/mri_mcqa/mri_mcqa_adapter.py +34 -0
  130. evalscope/benchmarks/multi_if/__init__.py +0 -0
  131. evalscope/benchmarks/multi_if/ifeval.py +3354 -0
  132. evalscope/benchmarks/multi_if/metrics.py +120 -0
  133. evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
  134. evalscope/benchmarks/music_trivia/__init__.py +0 -0
  135. evalscope/benchmarks/music_trivia/music_trivia_adapter.py +36 -0
  136. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +7 -6
  137. evalscope/benchmarks/ner/__init__.py +0 -0
  138. evalscope/benchmarks/ner/broad_twitter_corpus_adapter.py +52 -0
  139. evalscope/benchmarks/ner/conll2003_adapter.py +48 -0
  140. evalscope/benchmarks/ner/copious_adapter.py +85 -0
  141. evalscope/benchmarks/ner/cross_ner_adapter.py +120 -0
  142. evalscope/benchmarks/ner/cross_ner_entities/__init__.py +0 -0
  143. evalscope/benchmarks/ner/cross_ner_entities/ai.py +54 -0
  144. evalscope/benchmarks/ner/cross_ner_entities/literature.py +36 -0
  145. evalscope/benchmarks/ner/cross_ner_entities/music.py +39 -0
  146. evalscope/benchmarks/ner/cross_ner_entities/politics.py +37 -0
  147. evalscope/benchmarks/ner/cross_ner_entities/science.py +58 -0
  148. evalscope/benchmarks/ner/genia_ner_adapter.py +66 -0
  149. evalscope/benchmarks/ner/harvey_ner_adapter.py +58 -0
  150. evalscope/benchmarks/ner/mit_movie_trivia_adapter.py +74 -0
  151. evalscope/benchmarks/ner/mit_restaurant_adapter.py +66 -0
  152. evalscope/benchmarks/ner/ontonotes5_adapter.py +87 -0
  153. evalscope/benchmarks/ner/wnut2017_adapter.py +61 -0
  154. evalscope/benchmarks/ocr_bench/__init__.py +0 -0
  155. evalscope/benchmarks/ocr_bench/ocr_bench/__init__.py +0 -0
  156. evalscope/benchmarks/ocr_bench/ocr_bench/ocr_bench_adapter.py +101 -0
  157. evalscope/benchmarks/ocr_bench/ocr_bench_v2/IoUscore_metric.py +87 -0
  158. evalscope/benchmarks/ocr_bench/ocr_bench_v2/TEDS_metric.py +963 -0
  159. evalscope/benchmarks/ocr_bench/ocr_bench_v2/__init__.py +0 -0
  160. evalscope/benchmarks/ocr_bench/ocr_bench_v2/ocr_bench_v2_adapter.py +161 -0
  161. evalscope/benchmarks/ocr_bench/ocr_bench_v2/page_ocr_metric.py +50 -0
  162. evalscope/benchmarks/ocr_bench/ocr_bench_v2/parallel.py +46 -0
  163. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/__init__.py +0 -0
  164. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/readme.txt +26 -0
  165. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +537 -0
  166. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/script.py +481 -0
  167. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_metric.py +179 -0
  168. evalscope/benchmarks/ocr_bench/ocr_bench_v2/utils.py +433 -0
  169. evalscope/benchmarks/ocr_bench/ocr_bench_v2/vqa_metric.py +254 -0
  170. evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
  171. evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
  172. evalscope/benchmarks/olympiad_bench/utils.py +565 -0
  173. evalscope/benchmarks/omni_bench/__init__.py +0 -0
  174. evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
  175. evalscope/benchmarks/omnidoc_bench/__init__.py +0 -0
  176. evalscope/benchmarks/omnidoc_bench/end2end_eval.py +349 -0
  177. evalscope/benchmarks/omnidoc_bench/metrics.py +547 -0
  178. evalscope/benchmarks/omnidoc_bench/omnidoc_bench_adapter.py +135 -0
  179. evalscope/benchmarks/omnidoc_bench/utils.py +1937 -0
  180. evalscope/benchmarks/piqa/__init__.py +0 -0
  181. evalscope/benchmarks/piqa/piqa_adapter.py +32 -0
  182. evalscope/benchmarks/poly_math/__init__.py +0 -0
  183. evalscope/benchmarks/poly_math/poly_math_adapter.py +132 -0
  184. evalscope/benchmarks/poly_math/utils/instruction.py +105 -0
  185. evalscope/benchmarks/pope/__init__.py +0 -0
  186. evalscope/benchmarks/pope/pope_adapter.py +112 -0
  187. evalscope/benchmarks/process_bench/process_bench_adapter.py +1 -0
  188. evalscope/benchmarks/pumed_qa/__init__.py +0 -0
  189. evalscope/benchmarks/pumed_qa/pubmed_qa_adapter.py +175 -0
  190. evalscope/benchmarks/qasc/__init__.py +0 -0
  191. evalscope/benchmarks/qasc/qasc_adapter.py +35 -0
  192. evalscope/benchmarks/real_world_qa/__init__.py +0 -0
  193. evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
  194. evalscope/benchmarks/sciq/__init__.py +0 -0
  195. evalscope/benchmarks/sciq/sciq_adapter.py +36 -0
  196. evalscope/benchmarks/seed_bench_2_plus/__init__.py +0 -0
  197. evalscope/benchmarks/seed_bench_2_plus/seed_bench_2_plus_adapter.py +72 -0
  198. evalscope/benchmarks/simple_qa/simple_qa_adapter.py +1 -1
  199. evalscope/benchmarks/simple_vqa/__init__.py +0 -0
  200. evalscope/benchmarks/simple_vqa/simple_vqa_adapter.py +169 -0
  201. evalscope/benchmarks/siqa/__init__.py +0 -0
  202. evalscope/benchmarks/siqa/siqa_adapter.py +39 -0
  203. evalscope/benchmarks/tau_bench/tau2_bench/__init__.py +0 -0
  204. evalscope/benchmarks/tau_bench/tau2_bench/generation.py +158 -0
  205. evalscope/benchmarks/tau_bench/tau2_bench/tau2_bench_adapter.py +146 -0
  206. evalscope/benchmarks/tau_bench/tau_bench/__init__.py +0 -0
  207. evalscope/benchmarks/tau_bench/{generation.py → tau_bench/generation.py} +1 -1
  208. evalscope/benchmarks/tau_bench/{tau_bench_adapter.py → tau_bench/tau_bench_adapter.py} +29 -29
  209. evalscope/benchmarks/text2image/__init__.py +0 -0
  210. evalscope/benchmarks/{aigc/t2i → text2image}/evalmuse_adapter.py +3 -1
  211. evalscope/benchmarks/{aigc/t2i → text2image}/genai_bench_adapter.py +2 -2
  212. evalscope/benchmarks/{aigc/t2i → text2image}/general_t2i_adapter.py +1 -1
  213. evalscope/benchmarks/{aigc/t2i → text2image}/hpdv2_adapter.py +7 -2
  214. evalscope/benchmarks/{aigc/t2i → text2image}/tifa_adapter.py +1 -0
  215. evalscope/benchmarks/tool_bench/tool_bench_adapter.py +3 -3
  216. evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +1 -2
  217. evalscope/benchmarks/visu_logic/__init__.py +0 -0
  218. evalscope/benchmarks/visu_logic/visu_logic_adapter.py +75 -0
  219. evalscope/benchmarks/wmt/__init__.py +0 -0
  220. evalscope/benchmarks/wmt/wmt24_adapter.py +294 -0
  221. evalscope/benchmarks/zerobench/__init__.py +0 -0
  222. evalscope/benchmarks/zerobench/zerobench_adapter.py +64 -0
  223. evalscope/cli/start_app.py +7 -1
  224. evalscope/cli/start_perf.py +7 -1
  225. evalscope/config.py +103 -18
  226. evalscope/constants.py +18 -0
  227. evalscope/evaluator/evaluator.py +138 -82
  228. evalscope/metrics/bert_score/__init__.py +0 -0
  229. evalscope/metrics/bert_score/scorer.py +338 -0
  230. evalscope/metrics/bert_score/utils.py +697 -0
  231. evalscope/metrics/llm_judge.py +19 -7
  232. evalscope/metrics/math_parser.py +14 -0
  233. evalscope/metrics/metric.py +317 -13
  234. evalscope/metrics/metrics.py +37 -0
  235. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +0 -0
  236. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +0 -0
  237. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +0 -0
  238. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +0 -0
  239. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +0 -0
  240. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +0 -0
  241. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +0 -0
  242. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +0 -0
  243. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +0 -0
  244. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +0 -0
  245. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +2 -6
  246. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +2 -6
  247. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +2 -6
  248. evalscope/models/image_edit_model.py +125 -0
  249. evalscope/models/model_apis.py +22 -0
  250. evalscope/models/openai_compatible.py +21 -0
  251. evalscope/models/text2image_model.py +2 -2
  252. evalscope/models/utils/openai.py +16 -6
  253. evalscope/perf/arguments.py +26 -4
  254. evalscope/perf/benchmark.py +76 -89
  255. evalscope/perf/http_client.py +31 -16
  256. evalscope/perf/main.py +15 -2
  257. evalscope/perf/plugin/api/base.py +9 -7
  258. evalscope/perf/plugin/api/custom_api.py +13 -58
  259. evalscope/perf/plugin/api/default_api.py +188 -79
  260. evalscope/perf/plugin/api/openai_api.py +85 -20
  261. evalscope/perf/plugin/datasets/base.py +21 -0
  262. evalscope/perf/plugin/datasets/custom.py +2 -3
  263. evalscope/perf/plugin/datasets/flickr8k.py +2 -2
  264. evalscope/perf/plugin/datasets/kontext_bench.py +2 -2
  265. evalscope/perf/plugin/datasets/line_by_line.py +2 -3
  266. evalscope/perf/plugin/datasets/longalpaca.py +2 -3
  267. evalscope/perf/plugin/datasets/openqa.py +2 -4
  268. evalscope/perf/plugin/datasets/random_dataset.py +1 -3
  269. evalscope/perf/plugin/datasets/random_vl_dataset.py +2 -2
  270. evalscope/perf/utils/benchmark_util.py +43 -27
  271. evalscope/perf/utils/db_util.py +14 -19
  272. evalscope/perf/utils/local_server.py +3 -44
  273. evalscope/perf/utils/log_utils.py +21 -6
  274. evalscope/report/__init__.py +13 -3
  275. evalscope/report/combinator.py +91 -20
  276. evalscope/report/generator.py +8 -87
  277. evalscope/report/report.py +8 -4
  278. evalscope/run.py +13 -5
  279. evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -4
  280. evalscope/utils/argument_utils.py +1 -1
  281. evalscope/utils/chat_service.py +1 -1
  282. evalscope/utils/function_utils.py +249 -12
  283. evalscope/utils/import_utils.py +73 -1
  284. evalscope/utils/io_utils.py +132 -7
  285. evalscope/utils/json_schema.py +25 -2
  286. evalscope/utils/logger.py +69 -18
  287. evalscope/utils/model_utils.py +4 -3
  288. evalscope/utils/multi_choices.py +39 -7
  289. evalscope/utils/ner.py +377 -0
  290. evalscope/version.py +2 -2
  291. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/METADATA +252 -408
  292. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/RECORD +290 -154
  293. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/WHEEL +1 -1
  294. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/top_level.txt +0 -1
  295. evalscope/api/mixin/dataset_mixin.py +0 -105
  296. evalscope/benchmarks/aigc/i2i/general_i2i_adapter.py +0 -44
  297. tests/__init__.py +0 -1
  298. tests/aigc/__init__.py +0 -1
  299. tests/aigc/test_t2i.py +0 -142
  300. tests/benchmark/__init__.py +0 -1
  301. tests/benchmark/test_eval.py +0 -386
  302. tests/cli/__init__.py +0 -1
  303. tests/cli/test_all.py +0 -229
  304. tests/cli/test_collection.py +0 -96
  305. tests/cli/test_custom.py +0 -268
  306. tests/perf/__init__.py +0 -1
  307. tests/perf/test_perf.py +0 -176
  308. tests/rag/test_clip_benchmark.py +0 -90
  309. tests/rag/test_mteb.py +0 -213
  310. tests/rag/test_ragas.py +0 -128
  311. tests/swift/__init__.py +0 -1
  312. tests/swift/test_run_swift_eval.py +0 -146
  313. tests/swift/test_run_swift_vlm_eval.py +0 -128
  314. tests/swift/test_run_swift_vlm_jugde_eval.py +0 -157
  315. tests/test_run_all.py +0 -12
  316. tests/utils.py +0 -13
  317. tests/vlm/__init__.py +0 -1
  318. tests/vlm/test_vlmeval.py +0 -102
  319. /evalscope/benchmarks/{aigc → aa_lcr}/__init__.py +0 -0
  320. /evalscope/benchmarks/{aigc/i2i → ai2d}/__init__.py +0 -0
  321. /evalscope/benchmarks/{aigc/t2i → amc}/__init__.py +0 -0
  322. {tests/rag → evalscope/benchmarks/bfcl/v3}/__init__.py +0 -0
  323. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/entry_points.txt +0 -0
  324. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info/licenses}/LICENSE +0 -0
@@ -0,0 +1,235 @@
1
+ import contextlib
2
+ import faulthandler
3
+ import io
4
+ import multiprocessing
5
+ import os
6
+ import platform
7
+ import signal
8
+ import tempfile
9
+ from typing import Dict, Optional
10
+
11
+
12
+ def unsafe_execute(problem: Dict, completion: str, timeout: float, result):
13
+ with create_tempdir():
14
+
15
+ # These system calls are needed when cleaning up tempdir.
16
+ import os
17
+ import shutil
18
+
19
+ rmtree = shutil.rmtree
20
+ rmdir = os.rmdir
21
+ chdir = os.chdir
22
+
23
+ # Disable functionalities that can make destructive changes to the test.
24
+ reliability_guard()
25
+
26
+ # Construct the check program and run it.
27
+ check_program = (
28
+ problem['prompt'] + completion + '\n' + problem['test'] + '\n' + f"check({problem['entry_point']})"
29
+ )
30
+
31
+ try:
32
+ exec_globals = {}
33
+ with swallow_io():
34
+ with time_limit(timeout):
35
+ # WARNING
36
+ # This program exists to execute untrusted model-generated code. Although
37
+ # it is highly unlikely that model-generated code will do something overtly
38
+ # malicious in response to this test suite, model-generated code may act
39
+ # destructively due to a lack of model capability or alignment.
40
+ # Users are strongly encouraged to sandbox this evaluation suite so that it
41
+ # does not perform destructive actions on their host or network. For more
42
+ # information on how OpenAI sandboxes its code, see the accompanying paper.
43
+ # Once you have read this disclaimer and taken appropriate precautions,
44
+ # uncomment the following line and proceed at your own risk:
45
+ exec(check_program, exec_globals)
46
+ result.append('passed')
47
+ except TimeoutException:
48
+ result.append('timed out')
49
+ except BaseException as e:
50
+ result.append(f'failed: {e}')
51
+
52
+ # Needed for cleaning up.
53
+ shutil.rmtree = rmtree
54
+ os.rmdir = rmdir
55
+ os.chdir = chdir
56
+
57
+
58
+ def check_correctness(problem: Dict, completion: str, timeout: float, completion_id: Optional[int] = None) -> Dict:
59
+ """
60
+ Evaluates the functional correctness of a completion by running the test
61
+ suite provided in the problem.
62
+
63
+ :param completion_id: an optional completion ID so we can match
64
+ the results later even if execution finishes asynchronously.
65
+ """
66
+
67
+ manager = multiprocessing.Manager()
68
+ result = manager.list()
69
+
70
+ p = multiprocessing.Process(target=unsafe_execute, args=(problem, completion, timeout, result))
71
+ p.start()
72
+ p.join(timeout=timeout + 1)
73
+ if p.is_alive():
74
+ p.kill()
75
+
76
+ if not result:
77
+ result.append('timed out')
78
+
79
+ return dict(
80
+ task_id=problem['task_id'],
81
+ passed=result[0] == 'passed',
82
+ result=result[0],
83
+ completion_id=completion_id,
84
+ )
85
+
86
+
87
+ @contextlib.contextmanager
88
+ def time_limit(seconds: float):
89
+
90
+ def signal_handler(signum, frame):
91
+ raise TimeoutException('Timed out!')
92
+
93
+ signal.setitimer(signal.ITIMER_REAL, seconds)
94
+ signal.signal(signal.SIGALRM, signal_handler)
95
+ try:
96
+ yield
97
+ finally:
98
+ signal.setitimer(signal.ITIMER_REAL, 0)
99
+
100
+
101
+ @contextlib.contextmanager
102
+ def swallow_io():
103
+ stream = WriteOnlyStringIO()
104
+ with contextlib.redirect_stdout(stream):
105
+ with contextlib.redirect_stderr(stream):
106
+ with redirect_stdin(stream):
107
+ yield
108
+
109
+
110
+ @contextlib.contextmanager
111
+ def create_tempdir():
112
+ with tempfile.TemporaryDirectory() as dirname:
113
+ with chdir(dirname):
114
+ yield dirname
115
+
116
+
117
+ class TimeoutException(Exception):
118
+ pass
119
+
120
+
121
+ class WriteOnlyStringIO(io.StringIO):
122
+ """StringIO that throws an exception when it's read from"""
123
+
124
+ def read(self, *args, **kwargs):
125
+ raise IOError
126
+
127
+ def readline(self, *args, **kwargs):
128
+ raise IOError
129
+
130
+ def readlines(self, *args, **kwargs):
131
+ raise IOError
132
+
133
+ def readable(self, *args, **kwargs):
134
+ """Returns True if the IO object can be read."""
135
+ return False
136
+
137
+
138
+ class redirect_stdin(contextlib._RedirectStream): # type: ignore
139
+ _stream = 'stdin'
140
+
141
+
142
+ @contextlib.contextmanager
143
+ def chdir(root):
144
+ if root == '.':
145
+ yield
146
+ return
147
+ cwd = os.getcwd()
148
+ os.chdir(root)
149
+ try:
150
+ yield
151
+ except BaseException as exc:
152
+ raise exc
153
+ finally:
154
+ os.chdir(cwd)
155
+
156
+
157
+ def reliability_guard(maximum_memory_bytes: Optional[int] = None):
158
+ """
159
+ This disables various destructive functions and prevents the generated code
160
+ from interfering with the test (e.g. fork bomb, killing other processes,
161
+ removing filesystem files, etc.)
162
+
163
+ WARNING
164
+ This function is NOT a security sandbox. Untrusted code, including, model-
165
+ generated code, should not be blindly executed outside of one. See the
166
+ Codex paper for more information about OpenAI's code sandbox, and proceed
167
+ with caution.
168
+ """
169
+
170
+ if maximum_memory_bytes is not None:
171
+ import resource
172
+
173
+ resource.setrlimit(resource.RLIMIT_AS, (maximum_memory_bytes, maximum_memory_bytes))
174
+ resource.setrlimit(resource.RLIMIT_DATA, (maximum_memory_bytes, maximum_memory_bytes))
175
+ if not platform.uname().system == 'Darwin':
176
+ resource.setrlimit(resource.RLIMIT_STACK, (maximum_memory_bytes, maximum_memory_bytes))
177
+
178
+ faulthandler.disable()
179
+
180
+ import builtins
181
+
182
+ builtins.exit = None
183
+ builtins.quit = None
184
+
185
+ import os
186
+
187
+ os.environ['OMP_NUM_THREADS'] = '1'
188
+
189
+ os.kill = None
190
+ os.system = None
191
+ os.putenv = None
192
+ os.remove = None
193
+ os.removedirs = None
194
+ os.rmdir = None
195
+ os.fchdir = None
196
+ os.setuid = None
197
+ os.fork = None
198
+ os.forkpty = None
199
+ os.killpg = None
200
+ os.rename = None
201
+ os.renames = None
202
+ os.truncate = None
203
+ os.replace = None
204
+ os.unlink = None
205
+ os.fchmod = None
206
+ os.fchown = None
207
+ os.chmod = None
208
+ os.chown = None
209
+ os.chroot = None
210
+ os.fchdir = None
211
+ os.lchflags = None
212
+ os.lchmod = None
213
+ os.lchown = None
214
+ os.getcwd = None
215
+ os.chdir = None
216
+
217
+ import shutil
218
+
219
+ shutil.rmtree = None
220
+ shutil.move = None
221
+ shutil.chown = None
222
+
223
+ import subprocess
224
+
225
+ subprocess.Popen = None # type: ignore
226
+
227
+ __builtins__['help'] = None
228
+
229
+ import sys
230
+
231
+ sys.modules['ipdb'] = None
232
+ sys.modules['joblib'] = None
233
+ sys.modules['resource'] = None
234
+ sys.modules['psutil'] = None
235
+ sys.modules['tkinter'] = None
@@ -14,7 +14,6 @@
14
14
  """Utility library of instructions."""
15
15
 
16
16
  import functools
17
- import immutabledict
18
17
  import nltk
19
18
  import os
20
19
  import random
@@ -1551,7 +1550,7 @@ WORD_LIST = [
1551
1550
  ] # pylint: disable=line-too-long
1552
1551
 
1553
1552
  # ISO 639-1 codes to language names.
1554
- LANGUAGE_CODES = immutabledict.immutabledict({
1553
+ LANGUAGE_CODES = {
1555
1554
  'en': 'English',
1556
1555
  'es': 'Spanish',
1557
1556
  'pt': 'Portuguese',
@@ -1582,7 +1581,7 @@ LANGUAGE_CODES = immutabledict.immutabledict({
1582
1581
  'pa': 'Punjabi',
1583
1582
  'ml': 'Malayalam',
1584
1583
  'fi': 'Finnish',
1585
- })
1584
+ }
1586
1585
 
1587
1586
  _ALPHABETS = '([A-Za-z])'
1588
1587
  _PREFIXES = '(Mr|St|Mrs|Ms|Dr)[.]'
File without changes
File without changes
@@ -0,0 +1,138 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+ import copy
3
+ import os
4
+ from typing import Any, Dict, List
5
+
6
+ from evalscope.api.benchmark import BenchmarkMeta, ImageEditAdapter
7
+ from evalscope.api.dataset import Sample
8
+ from evalscope.api.evaluator.state import TaskState
9
+ from evalscope.api.messages import ChatMessage, ChatMessageUser, Content, ContentImage, ContentText
10
+ from evalscope.api.metric.scorer import Score
11
+ from evalscope.api.registry import register_benchmark
12
+ from evalscope.constants import FileConstants, Tags
13
+ from evalscope.utils.io_utils import bytes_to_base64
14
+ from evalscope.utils.logger import get_logger
15
+
16
+ logger = get_logger()
17
+
18
+ SUBSET_LIST = [
19
+ 'background_change', 'color_alter', 'material_alter', 'motion_change', 'ps_human', 'style_change', 'subject-add',
20
+ 'subject-remove', 'subject-replace', 'text_change', 'tone_transfer'
21
+ ]
22
+
23
+ LANGUAGE_LIST = ['en', 'cn']
24
+
25
+
26
+ @register_benchmark(
27
+ BenchmarkMeta(
28
+ name='gedit',
29
+ pretty_name='GEdit-Bench',
30
+ dataset_id='stepfun-ai/GEdit-Bench',
31
+ description='GEdit-Bench Image Editing Benchmark, grounded in real-world '
32
+ 'usages is developed to support more authentic and '
33
+ 'comprehensive evaluation of image editing models.',
34
+ tags=[Tags.IMAGE_EDITING],
35
+ subset_list=SUBSET_LIST,
36
+ metric_list=['Semantic Consistency', 'Perceptual Similarity'],
37
+ few_shot_num=0,
38
+ train_split=None,
39
+ eval_split='train',
40
+ extra_params={'language': f'# language of the instruction, choose from {LANGUAGE_LIST}, default to `en`'}
41
+ )
42
+ )
43
+ class GEditAdapter(ImageEditAdapter):
44
+
45
+ def __init__(self, **kwargs):
46
+ super().__init__(**kwargs)
47
+
48
+ self.language = self.extra_params.get('language', 'en')
49
+ if self.language not in LANGUAGE_LIST:
50
+ logger.warning(f"Invalid language '{self.language}', fallback to 'en'")
51
+ self.language = 'en'
52
+ self.reformat_subset = True
53
+ self._use_llm_judge = True
54
+
55
+ self.load_prompt()
56
+
57
+ def load_prompt(self):
58
+ from . import vie_prompts
59
+
60
+ self.context = vie_prompts._context_no_delimit
61
+ self.SC_prompt = '\n'.join([
62
+ self.context, vie_prompts._prompts_0shot_two_image_edit_rule, vie_prompts._prompts_0shot_tie_rule_SC
63
+ ])
64
+ self.PQ_prompt = '\n'.join([self.context, vie_prompts._prompts_0shot_rule_PQ])
65
+
66
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
67
+ record = copy.deepcopy(record)
68
+
69
+ # Process instruction and image
70
+ instruction = record['instruction']
71
+ image_bytes = record['input_image']['bytes']
72
+ input_image = bytes_to_base64(image_bytes, format='png', add_header=True)
73
+ record['input_image'] = input_image
74
+ record[FileConstants.ID] = record['key']
75
+ del record['input_image_raw']
76
+
77
+ text_content = ContentText(text=instruction)
78
+ image_content = ContentImage(image=input_image)
79
+
80
+ messages: List[ChatMessage] = [
81
+ ChatMessageUser(content=[text_content, image_content]),
82
+ ]
83
+
84
+ return Sample(input=messages, subset_key=record['task_type'], metadata=record)
85
+
86
+ def sample_filter(self, sample: Sample) -> bool:
87
+ language = sample.metadata.get('instruction_language', 'en')
88
+ return super().sample_filter(sample) and language == self.language
89
+
90
+ def llm_match_score(self, original_prediction, filtered_prediction, reference, task_state: TaskState) -> Score:
91
+ import math
92
+
93
+ from .utils import mllm_output_to_dict
94
+
95
+ metadata = task_state.metadata
96
+ text_prompt = metadata['instruction']
97
+ input_image = metadata['input_image'] # base64 image
98
+ edited_image = metadata[FileConstants.IMAGE_PATH] # local image path
99
+ _SC_prompt = self.SC_prompt.replace('<instruction>', text_prompt)
100
+
101
+ # Initialize the score object with prediction details
102
+ score = Score(
103
+ extracted_prediction=edited_image,
104
+ prediction=edited_image,
105
+ )
106
+
107
+ # Build prompts
108
+ SC_prompt_final = [
109
+ ChatMessageUser(
110
+ content=[
111
+ ContentImage(image=input_image),
112
+ ContentImage(image=edited_image),
113
+ ContentText(text=_SC_prompt)
114
+ ]
115
+ )
116
+ ]
117
+ PQ_prompt_final = [
118
+ ChatMessageUser(content=[ContentImage(image=edited_image),
119
+ ContentText(text=self.PQ_prompt)])
120
+ ]
121
+
122
+ guess_if_cannot_parse = True
123
+ result_SC = self.llm_judge.judge(messages=SC_prompt_final)
124
+ result_PQ = self.llm_judge.judge(messages=PQ_prompt_final)
125
+ SC_dict = mllm_output_to_dict(result_SC, give_up_parsing=guess_if_cannot_parse)
126
+ PQ_dict = mllm_output_to_dict(result_PQ, give_up_parsing=guess_if_cannot_parse)
127
+
128
+ SC_score = min(SC_dict['score'])
129
+ PQ_score = min(PQ_dict['score'])
130
+ O_score = math.sqrt(SC_score * PQ_score)
131
+
132
+ score.value = {'Semantic Consistency': SC_score, 'Perceptual Quality': PQ_score, 'Overall': O_score}
133
+ score.main_score_name = 'Overall'
134
+ score.metadata = {
135
+ 'SC_dict': SC_dict,
136
+ 'PQ_dict': PQ_dict,
137
+ }
138
+ return score