evalscope 1.0.0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (324) hide show
  1. evalscope/api/benchmark/__init__.py +9 -1
  2. evalscope/api/benchmark/adapters/__init__.py +4 -0
  3. evalscope/api/benchmark/adapters/agent_adapter.py +8 -0
  4. evalscope/api/benchmark/adapters/default_data_adapter.py +75 -4
  5. evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
  6. evalscope/api/benchmark/adapters/multi_choice_adapter.py +5 -2
  7. evalscope/api/benchmark/adapters/ner_adapter.py +212 -0
  8. evalscope/api/benchmark/adapters/text2image_adapter.py +12 -10
  9. evalscope/api/benchmark/adapters/vision_language_adapter.py +8 -0
  10. evalscope/api/benchmark/benchmark.py +85 -2
  11. evalscope/api/benchmark/meta.py +10 -1
  12. evalscope/api/dataset/dataset.py +27 -6
  13. evalscope/api/dataset/loader.py +8 -3
  14. evalscope/api/evaluator/cache.py +31 -4
  15. evalscope/api/evaluator/evaluator.py +5 -0
  16. evalscope/api/evaluator/state.py +17 -1
  17. evalscope/api/messages/__init__.py +1 -0
  18. evalscope/api/messages/chat_message.py +52 -2
  19. evalscope/api/metric/__init__.py +1 -1
  20. evalscope/api/metric/metric.py +6 -1
  21. evalscope/api/metric/scorer.py +15 -7
  22. evalscope/api/mixin/__init__.py +1 -1
  23. evalscope/api/mixin/llm_judge_mixin.py +2 -0
  24. evalscope/api/mixin/sandbox_mixin.py +182 -0
  25. evalscope/api/model/generate_config.py +10 -6
  26. evalscope/api/model/model.py +5 -2
  27. evalscope/api/tool/tool_info.py +1 -1
  28. evalscope/app/app.py +3 -0
  29. evalscope/app/ui/multi_model.py +6 -1
  30. evalscope/app/ui/single_model.py +11 -5
  31. evalscope/app/utils/data_utils.py +8 -7
  32. evalscope/app/utils/env_utils.py +12 -0
  33. evalscope/app/utils/text_utils.py +14 -12
  34. evalscope/app/utils/visualization.py +2 -2
  35. evalscope/arguments.py +8 -4
  36. evalscope/backend/opencompass/backend_manager.py +0 -2
  37. evalscope/backend/rag_eval/utils/embedding.py +9 -1
  38. evalscope/benchmarks/aa_lcr/aa_lcr_adapter.py +205 -0
  39. evalscope/benchmarks/ai2d/ai2d_adapter.py +54 -0
  40. evalscope/benchmarks/aime/aime24_adapter.py +5 -0
  41. evalscope/benchmarks/aime/aime25_adapter.py +136 -1
  42. evalscope/benchmarks/aime/grader.py +307 -0
  43. evalscope/benchmarks/aime/math_normalize.py +189 -0
  44. evalscope/benchmarks/amc/amc_adapter.py +51 -0
  45. evalscope/benchmarks/arena_hard/arena_hard_adapter.py +1 -0
  46. evalscope/benchmarks/bbh/bbh_adapter.py +43 -17
  47. evalscope/benchmarks/bfcl/{bfcl_adapter.py → v3/bfcl_v3_adapter.py} +131 -19
  48. evalscope/benchmarks/bfcl/{generation.py → v3/generation.py} +9 -9
  49. evalscope/benchmarks/bfcl/v3/utils.py +23 -0
  50. evalscope/benchmarks/bfcl/v4/__init__.py +0 -0
  51. evalscope/benchmarks/bfcl/v4/bfcl_v4_adapter.py +229 -0
  52. evalscope/benchmarks/bfcl/v4/utils.py +410 -0
  53. evalscope/benchmarks/biomix_qa/__init__.py +0 -0
  54. evalscope/benchmarks/biomix_qa/biomix_qa_adapter.py +36 -0
  55. evalscope/benchmarks/blink/__init__.py +0 -0
  56. evalscope/benchmarks/blink/blink_adapter.py +61 -0
  57. evalscope/benchmarks/ceval/ceval_adapter.py +1 -2
  58. evalscope/benchmarks/chartqa/__init__.py +0 -0
  59. evalscope/benchmarks/chartqa/chartqa_adapter.py +80 -0
  60. evalscope/benchmarks/chartqa/utils.py +38 -0
  61. evalscope/benchmarks/coin_flip/__init__.py +0 -0
  62. evalscope/benchmarks/coin_flip/coin_flip_adapter.py +128 -0
  63. evalscope/benchmarks/commonsense_qa/__init__.py +0 -0
  64. evalscope/benchmarks/commonsense_qa/commonsense_qa_adapter.py +32 -0
  65. evalscope/benchmarks/competition_math/competition_math_adapter.py +5 -0
  66. evalscope/benchmarks/data_collection/data_collection_adapter.py +24 -19
  67. evalscope/benchmarks/docvqa/__init__.py +0 -0
  68. evalscope/benchmarks/docvqa/docvqa_adapter.py +67 -0
  69. evalscope/benchmarks/drivelology/__init__.py +0 -0
  70. evalscope/benchmarks/drivelology/drivelology_binary_adapter.py +170 -0
  71. evalscope/benchmarks/drivelology/drivelology_multilabel_adapter.py +254 -0
  72. evalscope/benchmarks/drivelology/drivelology_selection_adapter.py +49 -0
  73. evalscope/benchmarks/drivelology/drivelology_writing_adapter.py +218 -0
  74. evalscope/benchmarks/drop/drop_adapter.py +15 -44
  75. evalscope/benchmarks/drop/utils.py +97 -0
  76. evalscope/benchmarks/frames/frames_adapter.py +2 -1
  77. evalscope/benchmarks/general_arena/general_arena_adapter.py +7 -2
  78. evalscope/benchmarks/general_arena/utils.py +2 -1
  79. evalscope/benchmarks/general_mcq/general_mcq_adapter.py +1 -1
  80. evalscope/benchmarks/general_qa/general_qa_adapter.py +1 -1
  81. evalscope/benchmarks/gsm8k/gsm8k_adapter.py +25 -9
  82. evalscope/benchmarks/hallusion_bench/__init__.py +0 -0
  83. evalscope/benchmarks/hallusion_bench/hallusion_bench_adapter.py +159 -0
  84. evalscope/benchmarks/halu_eval/__init__.py +0 -0
  85. evalscope/benchmarks/halu_eval/halu_eval_adapter.py +128 -0
  86. evalscope/benchmarks/halu_eval/halu_eval_instructions.py +84 -0
  87. evalscope/benchmarks/healthbench/__init__.py +0 -0
  88. evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
  89. evalscope/benchmarks/healthbench/utils.py +102 -0
  90. evalscope/benchmarks/hle/hle_adapter.py +3 -2
  91. evalscope/benchmarks/humaneval/humaneval_adapter.py +24 -52
  92. evalscope/benchmarks/humaneval/utils.py +235 -0
  93. evalscope/benchmarks/ifeval/instructions_util.py +2 -3
  94. evalscope/benchmarks/image_edit/__init__.py +0 -0
  95. evalscope/benchmarks/image_edit/gedit/__init__.py +0 -0
  96. evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
  97. evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
  98. evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
  99. evalscope/benchmarks/infovqa/__init__.py +0 -0
  100. evalscope/benchmarks/infovqa/infovqa_adapter.py +66 -0
  101. evalscope/benchmarks/live_code_bench/evaluate_utils.py +13 -6
  102. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +66 -54
  103. evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
  104. evalscope/benchmarks/logi_qa/__int__.py +0 -0
  105. evalscope/benchmarks/logi_qa/logi_qa_adapter.py +41 -0
  106. evalscope/benchmarks/math_500/math_500_adapter.py +5 -1
  107. evalscope/benchmarks/math_qa/__init__.py +0 -0
  108. evalscope/benchmarks/math_qa/math_qa_adapter.py +35 -0
  109. evalscope/benchmarks/math_verse/__init__.py +0 -0
  110. evalscope/benchmarks/math_verse/math_verse_adapter.py +105 -0
  111. evalscope/benchmarks/math_vision/__init__.py +0 -0
  112. evalscope/benchmarks/math_vision/math_vision_adapter.py +116 -0
  113. evalscope/benchmarks/math_vista/__init__.py +0 -0
  114. evalscope/benchmarks/math_vista/math_vista_adapter.py +114 -0
  115. evalscope/benchmarks/med_mcqa/__init__.py +0 -0
  116. evalscope/benchmarks/med_mcqa/med_mcqa_adapter.py +32 -0
  117. evalscope/benchmarks/minerva_math/__init__.py +0 -0
  118. evalscope/benchmarks/minerva_math/minerva_math_adapter.py +53 -0
  119. evalscope/benchmarks/mm_bench/__init__.py +0 -0
  120. evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
  121. evalscope/benchmarks/mm_star/__init__.py +0 -0
  122. evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
  123. evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +1 -1
  124. evalscope/benchmarks/mmmu/__init__.py +0 -0
  125. evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
  126. evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
  127. evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +124 -0
  128. evalscope/benchmarks/mri_mcqa/__init__.py +0 -0
  129. evalscope/benchmarks/mri_mcqa/mri_mcqa_adapter.py +34 -0
  130. evalscope/benchmarks/multi_if/__init__.py +0 -0
  131. evalscope/benchmarks/multi_if/ifeval.py +3354 -0
  132. evalscope/benchmarks/multi_if/metrics.py +120 -0
  133. evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
  134. evalscope/benchmarks/music_trivia/__init__.py +0 -0
  135. evalscope/benchmarks/music_trivia/music_trivia_adapter.py +36 -0
  136. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +7 -6
  137. evalscope/benchmarks/ner/__init__.py +0 -0
  138. evalscope/benchmarks/ner/broad_twitter_corpus_adapter.py +52 -0
  139. evalscope/benchmarks/ner/conll2003_adapter.py +48 -0
  140. evalscope/benchmarks/ner/copious_adapter.py +85 -0
  141. evalscope/benchmarks/ner/cross_ner_adapter.py +120 -0
  142. evalscope/benchmarks/ner/cross_ner_entities/__init__.py +0 -0
  143. evalscope/benchmarks/ner/cross_ner_entities/ai.py +54 -0
  144. evalscope/benchmarks/ner/cross_ner_entities/literature.py +36 -0
  145. evalscope/benchmarks/ner/cross_ner_entities/music.py +39 -0
  146. evalscope/benchmarks/ner/cross_ner_entities/politics.py +37 -0
  147. evalscope/benchmarks/ner/cross_ner_entities/science.py +58 -0
  148. evalscope/benchmarks/ner/genia_ner_adapter.py +66 -0
  149. evalscope/benchmarks/ner/harvey_ner_adapter.py +58 -0
  150. evalscope/benchmarks/ner/mit_movie_trivia_adapter.py +74 -0
  151. evalscope/benchmarks/ner/mit_restaurant_adapter.py +66 -0
  152. evalscope/benchmarks/ner/ontonotes5_adapter.py +87 -0
  153. evalscope/benchmarks/ner/wnut2017_adapter.py +61 -0
  154. evalscope/benchmarks/ocr_bench/__init__.py +0 -0
  155. evalscope/benchmarks/ocr_bench/ocr_bench/__init__.py +0 -0
  156. evalscope/benchmarks/ocr_bench/ocr_bench/ocr_bench_adapter.py +101 -0
  157. evalscope/benchmarks/ocr_bench/ocr_bench_v2/IoUscore_metric.py +87 -0
  158. evalscope/benchmarks/ocr_bench/ocr_bench_v2/TEDS_metric.py +963 -0
  159. evalscope/benchmarks/ocr_bench/ocr_bench_v2/__init__.py +0 -0
  160. evalscope/benchmarks/ocr_bench/ocr_bench_v2/ocr_bench_v2_adapter.py +161 -0
  161. evalscope/benchmarks/ocr_bench/ocr_bench_v2/page_ocr_metric.py +50 -0
  162. evalscope/benchmarks/ocr_bench/ocr_bench_v2/parallel.py +46 -0
  163. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/__init__.py +0 -0
  164. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/readme.txt +26 -0
  165. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +537 -0
  166. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/script.py +481 -0
  167. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_metric.py +179 -0
  168. evalscope/benchmarks/ocr_bench/ocr_bench_v2/utils.py +433 -0
  169. evalscope/benchmarks/ocr_bench/ocr_bench_v2/vqa_metric.py +254 -0
  170. evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
  171. evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
  172. evalscope/benchmarks/olympiad_bench/utils.py +565 -0
  173. evalscope/benchmarks/omni_bench/__init__.py +0 -0
  174. evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
  175. evalscope/benchmarks/omnidoc_bench/__init__.py +0 -0
  176. evalscope/benchmarks/omnidoc_bench/end2end_eval.py +349 -0
  177. evalscope/benchmarks/omnidoc_bench/metrics.py +547 -0
  178. evalscope/benchmarks/omnidoc_bench/omnidoc_bench_adapter.py +135 -0
  179. evalscope/benchmarks/omnidoc_bench/utils.py +1937 -0
  180. evalscope/benchmarks/piqa/__init__.py +0 -0
  181. evalscope/benchmarks/piqa/piqa_adapter.py +32 -0
  182. evalscope/benchmarks/poly_math/__init__.py +0 -0
  183. evalscope/benchmarks/poly_math/poly_math_adapter.py +132 -0
  184. evalscope/benchmarks/poly_math/utils/instruction.py +105 -0
  185. evalscope/benchmarks/pope/__init__.py +0 -0
  186. evalscope/benchmarks/pope/pope_adapter.py +112 -0
  187. evalscope/benchmarks/process_bench/process_bench_adapter.py +1 -0
  188. evalscope/benchmarks/pumed_qa/__init__.py +0 -0
  189. evalscope/benchmarks/pumed_qa/pubmed_qa_adapter.py +175 -0
  190. evalscope/benchmarks/qasc/__init__.py +0 -0
  191. evalscope/benchmarks/qasc/qasc_adapter.py +35 -0
  192. evalscope/benchmarks/real_world_qa/__init__.py +0 -0
  193. evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
  194. evalscope/benchmarks/sciq/__init__.py +0 -0
  195. evalscope/benchmarks/sciq/sciq_adapter.py +36 -0
  196. evalscope/benchmarks/seed_bench_2_plus/__init__.py +0 -0
  197. evalscope/benchmarks/seed_bench_2_plus/seed_bench_2_plus_adapter.py +72 -0
  198. evalscope/benchmarks/simple_qa/simple_qa_adapter.py +1 -1
  199. evalscope/benchmarks/simple_vqa/__init__.py +0 -0
  200. evalscope/benchmarks/simple_vqa/simple_vqa_adapter.py +169 -0
  201. evalscope/benchmarks/siqa/__init__.py +0 -0
  202. evalscope/benchmarks/siqa/siqa_adapter.py +39 -0
  203. evalscope/benchmarks/tau_bench/tau2_bench/__init__.py +0 -0
  204. evalscope/benchmarks/tau_bench/tau2_bench/generation.py +158 -0
  205. evalscope/benchmarks/tau_bench/tau2_bench/tau2_bench_adapter.py +146 -0
  206. evalscope/benchmarks/tau_bench/tau_bench/__init__.py +0 -0
  207. evalscope/benchmarks/tau_bench/{generation.py → tau_bench/generation.py} +1 -1
  208. evalscope/benchmarks/tau_bench/{tau_bench_adapter.py → tau_bench/tau_bench_adapter.py} +29 -29
  209. evalscope/benchmarks/text2image/__init__.py +0 -0
  210. evalscope/benchmarks/{aigc/t2i → text2image}/evalmuse_adapter.py +3 -1
  211. evalscope/benchmarks/{aigc/t2i → text2image}/genai_bench_adapter.py +2 -2
  212. evalscope/benchmarks/{aigc/t2i → text2image}/general_t2i_adapter.py +1 -1
  213. evalscope/benchmarks/{aigc/t2i → text2image}/hpdv2_adapter.py +7 -2
  214. evalscope/benchmarks/{aigc/t2i → text2image}/tifa_adapter.py +1 -0
  215. evalscope/benchmarks/tool_bench/tool_bench_adapter.py +3 -3
  216. evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +1 -2
  217. evalscope/benchmarks/visu_logic/__init__.py +0 -0
  218. evalscope/benchmarks/visu_logic/visu_logic_adapter.py +75 -0
  219. evalscope/benchmarks/wmt/__init__.py +0 -0
  220. evalscope/benchmarks/wmt/wmt24_adapter.py +294 -0
  221. evalscope/benchmarks/zerobench/__init__.py +0 -0
  222. evalscope/benchmarks/zerobench/zerobench_adapter.py +64 -0
  223. evalscope/cli/start_app.py +7 -1
  224. evalscope/cli/start_perf.py +7 -1
  225. evalscope/config.py +103 -18
  226. evalscope/constants.py +18 -0
  227. evalscope/evaluator/evaluator.py +138 -82
  228. evalscope/metrics/bert_score/__init__.py +0 -0
  229. evalscope/metrics/bert_score/scorer.py +338 -0
  230. evalscope/metrics/bert_score/utils.py +697 -0
  231. evalscope/metrics/llm_judge.py +19 -7
  232. evalscope/metrics/math_parser.py +14 -0
  233. evalscope/metrics/metric.py +317 -13
  234. evalscope/metrics/metrics.py +37 -0
  235. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +0 -0
  236. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +0 -0
  237. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +0 -0
  238. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +0 -0
  239. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +0 -0
  240. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +0 -0
  241. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +0 -0
  242. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +0 -0
  243. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +0 -0
  244. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +0 -0
  245. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +2 -6
  246. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +2 -6
  247. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +2 -6
  248. evalscope/models/image_edit_model.py +125 -0
  249. evalscope/models/model_apis.py +22 -0
  250. evalscope/models/openai_compatible.py +21 -0
  251. evalscope/models/text2image_model.py +2 -2
  252. evalscope/models/utils/openai.py +16 -6
  253. evalscope/perf/arguments.py +26 -4
  254. evalscope/perf/benchmark.py +76 -89
  255. evalscope/perf/http_client.py +31 -16
  256. evalscope/perf/main.py +15 -2
  257. evalscope/perf/plugin/api/base.py +9 -7
  258. evalscope/perf/plugin/api/custom_api.py +13 -58
  259. evalscope/perf/plugin/api/default_api.py +188 -79
  260. evalscope/perf/plugin/api/openai_api.py +85 -20
  261. evalscope/perf/plugin/datasets/base.py +21 -0
  262. evalscope/perf/plugin/datasets/custom.py +2 -3
  263. evalscope/perf/plugin/datasets/flickr8k.py +2 -2
  264. evalscope/perf/plugin/datasets/kontext_bench.py +2 -2
  265. evalscope/perf/plugin/datasets/line_by_line.py +2 -3
  266. evalscope/perf/plugin/datasets/longalpaca.py +2 -3
  267. evalscope/perf/plugin/datasets/openqa.py +2 -4
  268. evalscope/perf/plugin/datasets/random_dataset.py +1 -3
  269. evalscope/perf/plugin/datasets/random_vl_dataset.py +2 -2
  270. evalscope/perf/utils/benchmark_util.py +43 -27
  271. evalscope/perf/utils/db_util.py +14 -19
  272. evalscope/perf/utils/local_server.py +3 -44
  273. evalscope/perf/utils/log_utils.py +21 -6
  274. evalscope/report/__init__.py +13 -3
  275. evalscope/report/combinator.py +91 -20
  276. evalscope/report/generator.py +8 -87
  277. evalscope/report/report.py +8 -4
  278. evalscope/run.py +13 -5
  279. evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -4
  280. evalscope/utils/argument_utils.py +1 -1
  281. evalscope/utils/chat_service.py +1 -1
  282. evalscope/utils/function_utils.py +249 -12
  283. evalscope/utils/import_utils.py +73 -1
  284. evalscope/utils/io_utils.py +132 -7
  285. evalscope/utils/json_schema.py +25 -2
  286. evalscope/utils/logger.py +69 -18
  287. evalscope/utils/model_utils.py +4 -3
  288. evalscope/utils/multi_choices.py +39 -7
  289. evalscope/utils/ner.py +377 -0
  290. evalscope/version.py +2 -2
  291. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/METADATA +252 -408
  292. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/RECORD +290 -154
  293. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/WHEEL +1 -1
  294. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/top_level.txt +0 -1
  295. evalscope/api/mixin/dataset_mixin.py +0 -105
  296. evalscope/benchmarks/aigc/i2i/general_i2i_adapter.py +0 -44
  297. tests/__init__.py +0 -1
  298. tests/aigc/__init__.py +0 -1
  299. tests/aigc/test_t2i.py +0 -142
  300. tests/benchmark/__init__.py +0 -1
  301. tests/benchmark/test_eval.py +0 -386
  302. tests/cli/__init__.py +0 -1
  303. tests/cli/test_all.py +0 -229
  304. tests/cli/test_collection.py +0 -96
  305. tests/cli/test_custom.py +0 -268
  306. tests/perf/__init__.py +0 -1
  307. tests/perf/test_perf.py +0 -176
  308. tests/rag/test_clip_benchmark.py +0 -90
  309. tests/rag/test_mteb.py +0 -213
  310. tests/rag/test_ragas.py +0 -128
  311. tests/swift/__init__.py +0 -1
  312. tests/swift/test_run_swift_eval.py +0 -146
  313. tests/swift/test_run_swift_vlm_eval.py +0 -128
  314. tests/swift/test_run_swift_vlm_jugde_eval.py +0 -157
  315. tests/test_run_all.py +0 -12
  316. tests/utils.py +0 -13
  317. tests/vlm/__init__.py +0 -1
  318. tests/vlm/test_vlmeval.py +0 -102
  319. /evalscope/benchmarks/{aigc → aa_lcr}/__init__.py +0 -0
  320. /evalscope/benchmarks/{aigc/i2i → ai2d}/__init__.py +0 -0
  321. /evalscope/benchmarks/{aigc/t2i → amc}/__init__.py +0 -0
  322. {tests/rag → evalscope/benchmarks/bfcl/v3}/__init__.py +0 -0
  323. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/entry_points.txt +0 -0
  324. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info/licenses}/LICENSE +0 -0
@@ -9,7 +9,7 @@ from evalscope.api.dataset import DatasetDict, Sample
9
9
  from evalscope.api.evaluator import TaskState
10
10
  from evalscope.api.filter import FilterEnsemble, build_filter_ensemble
11
11
  from evalscope.api.metric import AggScore, SampleScore
12
- from evalscope.api.mixin import LLMJudgeMixin
12
+ from evalscope.api.mixin import LLMJudgeMixin, SandboxMixin
13
13
  from evalscope.api.model import Model
14
14
  from evalscope.report import Report
15
15
  from evalscope.utils.logger import get_logger
@@ -21,7 +21,7 @@ if TYPE_CHECKING:
21
21
  logger = get_logger()
22
22
 
23
23
 
24
- class DataAdapter(LLMJudgeMixin, ABC):
24
+ class DataAdapter(LLMJudgeMixin, SandboxMixin, ABC):
25
25
  """
26
26
  Data Adapter for the benchmark.
27
27
  """
@@ -40,9 +40,18 @@ class DataAdapter(LLMJudgeMixin, ABC):
40
40
  self.shuffle_choices = False
41
41
  """Whether to shuffle the choices in the dataset"""
42
42
 
43
+ self.use_batch_scoring = False
44
+ """Whether to use batch scoring for metrics that support it, need to be enabled in the benchmark as well"""
45
+
43
46
  self.save_metadata = True
44
47
  """Whether to save metadata in the review result"""
45
48
 
49
+ self.add_aggregation_name = True
50
+ """Whether to add aggregation name in the report"""
51
+
52
+ self.add_overall_metric = True
53
+ """Whether to add overall metric in the report"""
54
+
46
55
  self.category_map = {}
47
56
  """Category map for the benchmark"""
48
57
 
@@ -75,6 +84,12 @@ class DataAdapter(LLMJudgeMixin, ABC):
75
84
  def calculate_metrics(self, task_state: TaskState) -> SampleScore:
76
85
  pass
77
86
 
87
+ @abstractmethod
88
+ def batch_calculate_metrics(self, task_states: List[TaskState],
89
+ sample_scores: List[SampleScore]) -> List[SampleScore]:
90
+ """Batch calculate metrics for a list of task states. Need to update sample_scores in place."""
91
+ pass
92
+
78
93
  @abstractmethod
79
94
  def aggregate_scores(self, sample_scores: List[SampleScore]) -> List[AggScore]:
80
95
  pass
@@ -86,6 +101,11 @@ class DataAdapter(LLMJudgeMixin, ABC):
86
101
  """
87
102
  pass
88
103
 
104
+ @abstractmethod
105
+ def finalize(self, *args, **kwargs) -> None:
106
+ """Finalize the evaluation process."""
107
+ pass
108
+
89
109
  @property
90
110
  def name(self) -> str:
91
111
  """
@@ -170,6 +190,13 @@ class DataAdapter(LLMJudgeMixin, ABC):
170
190
  """
171
191
  return self._benchmark_meta.default_subset
172
192
 
193
+ @default_subset.setter
194
+ def default_subset(self, value: str):
195
+ """
196
+ Set the default subset of the benchmark.
197
+ """
198
+ self._benchmark_meta.default_subset = value
199
+
173
200
  @property
174
201
  def few_shot_num(self) -> int:
175
202
  """
@@ -198,6 +225,13 @@ class DataAdapter(LLMJudgeMixin, ABC):
198
225
  """
199
226
  return self._benchmark_meta.train_split
200
227
 
228
+ @train_split.setter
229
+ def train_split(self, value: str):
230
+ """
231
+ Set the train split of the benchmark.
232
+ """
233
+ self._benchmark_meta.train_split = value
234
+
201
235
  @property
202
236
  def eval_split(self) -> Optional[str]:
203
237
  """
@@ -205,6 +239,13 @@ class DataAdapter(LLMJudgeMixin, ABC):
205
239
  """
206
240
  return self._benchmark_meta.eval_split
207
241
 
242
+ @eval_split.setter
243
+ def eval_split(self, value: str):
244
+ """
245
+ Set the eval split of the benchmark.
246
+ """
247
+ self._benchmark_meta.eval_split = value
248
+
208
249
  @property
209
250
  def prompt_template(self) -> Optional[str]:
210
251
  """
@@ -299,6 +340,48 @@ class DataAdapter(LLMJudgeMixin, ABC):
299
340
  """
300
341
  return self._task_config.seed
301
342
 
343
+ @property
344
+ def shuffle(self) -> bool:
345
+ """
346
+ Return whether to shuffle the dataset before evaluation.
347
+ """
348
+ return self._benchmark_meta.shuffle
349
+
350
+ @shuffle.setter
351
+ def shuffle(self, value: bool):
352
+ """
353
+ Set whether to shuffle the dataset before evaluation.
354
+ """
355
+ self._benchmark_meta.shuffle = value
356
+
357
+ @property
358
+ def shuffle_choices(self) -> bool:
359
+ """
360
+ Return whether to shuffle the choices in multiple-choice datasets.
361
+ """
362
+ return self._benchmark_meta.shuffle_choices
363
+
364
+ @shuffle_choices.setter
365
+ def shuffle_choices(self, value: bool):
366
+ """
367
+ Set whether to shuffle the choices in multiple-choice datasets.
368
+ """
369
+ self._benchmark_meta.shuffle_choices = value
370
+
371
+ @property
372
+ def review_timeout(self) -> Optional[float]:
373
+ """
374
+ Return the timeout for the review process.
375
+ """
376
+ return self._benchmark_meta.review_timeout
377
+
378
+ @review_timeout.setter
379
+ def review_timeout(self, value: float):
380
+ """
381
+ Set the timeout for the review process.
382
+ """
383
+ self._benchmark_meta.review_timeout = value
384
+
302
385
  @contextlib.contextmanager
303
386
  def _temporary_attribute(self, attr_name: str, new_value):
304
387
  """
@@ -73,8 +73,17 @@ class BenchmarkMeta:
73
73
  aggregation: str = 'mean'
74
74
  """ Aggregation function for the metrics. Default is 'mean'. Can be 'mean', 'pass@<k>' or a custom function name."""
75
75
 
76
+ shuffle: bool = False
77
+ """Whether to shuffle the dataset before evaluation."""
78
+
79
+ shuffle_choices: bool = False
80
+ """Whether to shuffle the choices in multiple-choice datasets."""
81
+
82
+ review_timeout: Optional[float] = None
83
+ """Timeout for review in seconds."""
84
+
76
85
  extra_params: Dict = field(default_factory=dict)
77
- """ Additional parameters for the benchmark."""
86
+ """Additional parameters for the benchmark."""
78
87
 
79
88
  def __post_init__(self):
80
89
  """Validate fields after initialization."""
@@ -5,9 +5,8 @@ from dataclasses import dataclass, field
5
5
  from pydantic import BaseModel, Field
6
6
  from typing import Any, Callable, Dict, Iterator, List, Optional, Sequence, Union
7
7
 
8
- from evalscope.api.messages import ChatMessage, messages_pretty_str
8
+ from evalscope.api.messages import ChatMessage, messages_to_markdown
9
9
  from evalscope.api.tool import ToolInfo
10
- from evalscope.utils.multi_choices import answer_character, answer_index
11
10
 
12
11
 
13
12
  class Sample(BaseModel):
@@ -31,9 +30,6 @@ class Sample(BaseModel):
31
30
  tools: Optional[List[ToolInfo]] = None
32
31
  """List of tools available to the model during inference (optional)."""
33
32
 
34
- category: Optional[str] = None
35
- """Category of the sample (optional)."""
36
-
37
33
  subset_key: Optional[str] = None
38
34
  """Key for the subset this sample belongs to, used for generating subsets (optional)."""
39
35
 
@@ -54,7 +50,7 @@ class Sample(BaseModel):
54
50
  if isinstance(self.input, str):
55
51
  input_text = self.input
56
52
  else:
57
- input_text = messages_pretty_str(self.input)
53
+ input_text = messages_to_markdown(self.input, max_length=50)
58
54
  return f'Sample ID: {self.id}\nInput: {input_text}\nTarget: {self.target}'
59
55
 
60
56
 
@@ -230,6 +226,8 @@ class MemoryDataset(Dataset):
230
226
  self._shuffled = True
231
227
 
232
228
  def shuffle_choices(self, seed: Optional[int] = None) -> None:
229
+ from evalscope.utils.multi_choices import answer_character
230
+
233
231
  rand = random.Random(seed)
234
232
  for sample in self.samples:
235
233
  if not sample.choices:
@@ -249,6 +247,8 @@ class MemoryDataset(Dataset):
249
247
  sample.target = self._remap_target(sample.target, position_map=position_map)
250
248
 
251
249
  def _remap_target(self, target: Union[str, List[str]], position_map: Dict[int, str]) -> Union[str, List[str]]:
250
+ from evalscope.utils.multi_choices import answer_index
251
+
252
252
  if isinstance(target, list):
253
253
  return [position_map[answer_index(t)] for t in target]
254
254
  else:
@@ -347,3 +347,24 @@ class DatasetDict:
347
347
  cur_dataset.reindex(group_size=repeats)
348
348
  dataset_dict[key] = cur_dataset
349
349
  return cls(dataset_dict)
350
+
351
+ @classmethod
352
+ def from_dataset_dicts(cls, dataset_dicts: List['DatasetDict']) -> 'DatasetDict':
353
+ """
354
+ Create a DatasetDict by merging multiple DatasetDicts.
355
+
356
+ Args:
357
+ dataset_dicts (List[DatasetDict]): List of DatasetDicts to merge.
358
+
359
+ Returns:
360
+ DatasetDict: A new DatasetDict containing the merged datasets.
361
+ """
362
+ merged_dict = defaultdict(list)
363
+ for dataset_dict in dataset_dicts:
364
+ for key, dataset in dataset_dict.items():
365
+ merged_dict[key].extend(dataset.samples)
366
+ # Create a MemoryDataset for each subset key
367
+ final_dict = {}
368
+ for key, samples in merged_dict.items():
369
+ final_dict[key] = MemoryDataset(samples, name=key)
370
+ return cls(final_dict)
@@ -8,7 +8,7 @@ from typing import Callable, Dict, List, Optional, Union
8
8
  from evalscope.api.dataset.utils import record_to_sample_fn
9
9
  from evalscope.constants import DEFAULT_EVALSCOPE_CACHE_DIR, HubType
10
10
  from evalscope.utils import get_logger
11
- from evalscope.utils.io_utils import csv_to_list, gen_hash, jsonl_to_list, safe_filename
11
+ from evalscope.utils.io_utils import csv_to_list, gen_hash, jsonl_to_list, safe_filename, tsv_to_list
12
12
  from .dataset import Dataset, FieldSpec, MemoryDataset, Sample
13
13
  from .utils import data_to_samples, shuffle_choices_if_requested
14
14
 
@@ -126,7 +126,8 @@ class RemoteDataLoader(DataLoader):
126
126
  self.limit = int(len(dataset) * self.limit)
127
127
  elif isinstance(self.limit, int) and self.limit < 0:
128
128
  raise ValueError('Limit must be a non-negative integer or a float between 0 and 1.')
129
- dataset = dataset.select(range(self.limit))
129
+ if len(dataset) > self.limit:
130
+ dataset = dataset.select(range(self.limit))
130
131
 
131
132
  # convert to list
132
133
  dataset = dataset.to_list()
@@ -167,7 +168,11 @@ class LocalDataLoader(DataLoader):
167
168
  dataset = []
168
169
 
169
170
  # Check for JSONL or CSV files in the specified path
170
- for ext, loader in [('.jsonl', jsonl_to_list), ('.csv', csv_to_list)]:
171
+ for ext, loader in [
172
+ ('.jsonl', jsonl_to_list),
173
+ ('.csv', csv_to_list),
174
+ ('.tsv', tsv_to_list),
175
+ ]:
171
176
  # Check if the file exists with the given extension
172
177
  if os.path.isfile(path) and path.endswith(ext):
173
178
  file_paths = [path]
@@ -68,6 +68,8 @@ class CacheManager:
68
68
  # Convert to task state for further processing
69
69
  cached_state = cached_model_result.to_task_state(dataset=dataset)
70
70
 
71
+ if cached_state is None:
72
+ continue
71
73
  cached_task_states.append(cached_state)
72
74
  cached_sample_ids.add(cached_state.sample_id)
73
75
 
@@ -283,9 +285,11 @@ class ModelResult(BaseModel):
283
285
  Raises:
284
286
  ValueError: If the sample index is not found in the dataset
285
287
  """
286
- sample = dataset[self.index]
287
- if not sample:
288
- raise ValueError(f'Sample with index {self.index} not found in dataset')
288
+ try:
289
+ sample = dataset[self.index]
290
+ except IndexError:
291
+ logger.warning(f'Sample index {self.index} not found in dataset during cache restoration.')
292
+ return None
289
293
 
290
294
  # update metadata if exists
291
295
  if self.metadata:
@@ -299,6 +303,15 @@ class ModelResult(BaseModel):
299
303
  completed=True, # Mark as completed since it was cached
300
304
  )
301
305
 
306
+ def pretty_print(self) -> str:
307
+ """
308
+ Generate a pretty-printed string representation of the model result.
309
+
310
+ Returns:
311
+ A string representation of the model result
312
+ """
313
+ return self.model_dump_json(indent=2)
314
+
302
315
 
303
316
  class ReviewResult(BaseModel):
304
317
  """
@@ -340,7 +353,7 @@ class ReviewResult(BaseModel):
340
353
 
341
354
  return cls(
342
355
  index=state.sample_id,
343
- input=state.input_text,
356
+ input=state.input_markdown,
344
357
  target=state.target,
345
358
  sample_score=sample_score,
346
359
  )
@@ -353,3 +366,17 @@ class ReviewResult(BaseModel):
353
366
  The sample score object
354
367
  """
355
368
  return self.sample_score
369
+
370
+ def pretty_print(self) -> str:
371
+ """
372
+ Generate a pretty-printed string representation of the review result.
373
+
374
+ Returns:
375
+ A string representation of the review result
376
+ """
377
+ output = [
378
+ f'Review Result for Sample {self.index}:',
379
+ f'Target: {self.target}',
380
+ f'Score: {self.sample_score.model_dump_json(indent=2)}',
381
+ ]
382
+ return '\n'.join(output)
@@ -54,3 +54,8 @@ class Evaluator(abc.ABC):
54
54
  def get_report(self, *args, **kwargs) -> Report:
55
55
  """Get the evaluation report."""
56
56
  pass
57
+
58
+ @abc.abstractmethod
59
+ def finalize(self, *args, **kwargs) -> None:
60
+ """Finalize the evaluation process."""
61
+ pass
@@ -3,7 +3,7 @@ from random import Random
3
3
  from typing import Any, Dict, List, Optional, Sequence, Union, overload
4
4
 
5
5
  from evalscope.api.dataset import Sample
6
- from evalscope.api.messages import ChatMessage, ChatMessageUser, messages_pretty_str
6
+ from evalscope.api.messages import ChatMessage, ChatMessageUser, messages_pretty_str, messages_to_markdown
7
7
  from evalscope.api.model import ModelOutput
8
8
 
9
9
 
@@ -188,6 +188,17 @@ class TaskState:
188
188
  else:
189
189
  return messages_pretty_str(self._input)
190
190
 
191
+ @property
192
+ def input_markdown(self) -> str:
193
+ """Get the input text as markdown.
194
+
195
+ For multi-modal content, images will be represented in markdown format.
196
+ """
197
+ if isinstance(self._input, str):
198
+ return self._input
199
+ else:
200
+ return messages_to_markdown(self._input)
201
+
191
202
  @property
192
203
  def choices(self) -> Choices:
193
204
  """Choices for the sample, if applicable."""
@@ -262,3 +273,8 @@ class TaskState:
262
273
  def target(self) -> str:
263
274
  """The scoring target for this `Sample`."""
264
275
  return self._target.text
276
+
277
+ @target.setter
278
+ def target(self, text: str) -> None:
279
+ """Set the target for review purposes."""
280
+ self._target = Target(text)
@@ -6,6 +6,7 @@ from .chat_message import (
6
6
  ChatMessageUser,
7
7
  dict_to_chat_message,
8
8
  messages_pretty_str,
9
+ messages_to_markdown,
9
10
  )
10
11
  from .content import Content, ContentAudio, ContentData, ContentImage, ContentReasoning, ContentText, ContentVideo
11
12
  from .utils import parse_content_with_reasoning
@@ -3,7 +3,7 @@ from pydantic import BaseModel, Field, JsonValue, model_validator
3
3
  from typing import Any, Dict, List, Literal, Optional, Type, Union
4
4
 
5
5
  from evalscope.api.tool import ToolCall, ToolCallError
6
- from .content import Content, ContentReasoning, ContentText
6
+ from .content import Content, ContentAudio, ContentImage, ContentReasoning, ContentText
7
7
  from .utils import parse_content_with_reasoning
8
8
 
9
9
 
@@ -184,7 +184,7 @@ def dict_to_chat_message(data: Dict[str, Any]) -> ChatMessage:
184
184
 
185
185
 
186
186
  def messages_pretty_str(messages: List[ChatMessage]) -> str:
187
- """Pretty print a list of chat messages."""
187
+ """Pretty print a list of chat messages. Without images or other multi-modal contents."""
188
188
  output = []
189
189
  for message in messages:
190
190
  role = message.role.capitalize()
@@ -196,3 +196,53 @@ def messages_pretty_str(messages: List[ChatMessage]) -> str:
196
196
  content += f'\nFunction: {message.function}'
197
197
  output.append(f'**{role}**: {content}')
198
198
  return '\n\n'.join(output)
199
+
200
+
201
+ def messages_to_markdown(messages: List[ChatMessage], max_length: Optional[int] = None) -> str:
202
+ """Convert a list of chat messages to markdown format.
203
+
204
+ Args:
205
+ messages (List[ChatMessage]): The list of chat messages to convert.
206
+ max_length (Optional[int]): If provided, truncates the base64 string of images to this length.
207
+ """
208
+ output = []
209
+ for message in messages:
210
+ role = message.role.capitalize()
211
+
212
+ # Start with role header
213
+ content_parts = [f'**{role}**: ']
214
+
215
+ # Handle content based on type
216
+ if isinstance(message.content, str):
217
+ content_parts.append(message.content)
218
+ else:
219
+ for content_item in message.content:
220
+ if isinstance(content_item, ContentText):
221
+ content_parts.append(content_item.text)
222
+ elif isinstance(content_item, ContentImage):
223
+ # Use markdown image syntax
224
+ image_base64 = content_item.image
225
+ if max_length and len(image_base64) > max_length:
226
+ image_base64 = image_base64[:max_length]
227
+ content_parts.append(f'![image]({image_base64})')
228
+ elif isinstance(content_item, ContentAudio):
229
+ audio_base64 = content_item.audio
230
+ if max_length and len(audio_base64) > max_length:
231
+ audio_base64 = audio_base64[:max_length]
232
+ content_parts.append(f"<audio controls src='{audio_base64}'></audio>")
233
+ elif isinstance(content_item, ContentReasoning):
234
+ content_parts.append(f'**Reasoning:** {content_item.reasoning}')
235
+
236
+ # Add tool-specific information
237
+ if isinstance(message, ChatMessageTool):
238
+ if message.error:
239
+ content_parts.append(f'**Error:** {message.error.message}')
240
+ if message.function:
241
+ content_parts.append(f'**Function:** {message.function}')
242
+ elif isinstance(message, ChatMessageAssistant) and message.tool_calls:
243
+ for tool_call in message.tool_calls:
244
+ content_parts.append(f'**Tool Call:** {tool_call.function}')
245
+
246
+ output.append('\n'.join(content_parts))
247
+
248
+ return '\n\n'.join(output)
@@ -1,2 +1,2 @@
1
- from .metric import Metric, T2IMetric
1
+ from .metric import Metric, SingletonMetric, T2IMetric
2
2
  from .scorer import Aggregator, AggScore, SampleScore, Score, Value
@@ -28,7 +28,8 @@ class Metric(ABC):
28
28
  return self.apply([prediction], [reference])[0]
29
29
 
30
30
 
31
- class T2IMetric(Metric):
31
+ class SingletonMetric(Metric):
32
+ """Singleton base class for metrics."""
32
33
  _instance = None
33
34
 
34
35
  @thread_safe
@@ -48,6 +49,10 @@ class T2IMetric(Metric):
48
49
  def _init_once(self, *args, **kwargs):
49
50
  pass
50
51
 
52
+
53
+ class T2IMetric(SingletonMetric):
54
+ """Singleton base class for T2I metrics."""
55
+
51
56
  def apply(self, images: List[str], texts: List[str], **kwargs) -> List[Union[float, dict]]:
52
57
  pass
53
58
 
@@ -35,20 +35,28 @@ class Score(BaseModel):
35
35
  """Main score value."""
36
36
  if self.main_score_name and self.main_score_name in self.value:
37
37
  return self.value[self.main_score_name]
38
- return next(iter(self.value.values()), None)
38
+ elif self.value:
39
+ # If main_score_name is not set or not found, use the first value and update main_score_name
40
+ first_key = next(iter(self.value))
41
+ self.main_score_name = first_key
42
+ return self.value[first_key]
43
+ return None
39
44
 
40
45
  @main_value.setter
41
46
  def main_value(self, value: Union[int, float, bool]):
42
47
  """Set the main score value."""
43
48
  if self.main_score_name:
49
+ # If main_score_name is already set, use it
44
50
  self.value[self.main_score_name] = value
51
+ elif self.value:
52
+ # If no main_score_name but value dict exists, use the first key
53
+ first_key = next(iter(self.value))
54
+ self.main_score_name = first_key
55
+ self.value[first_key] = value
45
56
  else:
46
- # If no main score name is set, just update the first value
47
- if self.value:
48
- first_key = next(iter(self.value))
49
- self.value[first_key] = value
50
- else:
51
- self.value['default'] = value
57
+ # If neither main_score_name nor value dict exists, initialize both
58
+ self.main_score_name = 'default'
59
+ self.value[self.main_score_name] = value
52
60
 
53
61
 
54
62
  class SampleScore(BaseModel):
@@ -1,2 +1,2 @@
1
- from .dataset_mixin import DatasetLoaderMixin
2
1
  from .llm_judge_mixin import LLMJudgeMixin
2
+ from .sandbox_mixin import SandboxMixin
@@ -24,6 +24,8 @@ class LLMJudgeMixin:
24
24
 
25
25
  self._llm_judge: Optional[LLMJudge] = None
26
26
 
27
+ super().__init__(task_config=task_config)
28
+
27
29
  @property
28
30
  def llm_judge(self) -> Optional[LLMJudge]:
29
31
  """Get LLM judge instance with lazy initialization."""