evalscope 1.0.0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (324) hide show
  1. evalscope/api/benchmark/__init__.py +9 -1
  2. evalscope/api/benchmark/adapters/__init__.py +4 -0
  3. evalscope/api/benchmark/adapters/agent_adapter.py +8 -0
  4. evalscope/api/benchmark/adapters/default_data_adapter.py +75 -4
  5. evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
  6. evalscope/api/benchmark/adapters/multi_choice_adapter.py +5 -2
  7. evalscope/api/benchmark/adapters/ner_adapter.py +212 -0
  8. evalscope/api/benchmark/adapters/text2image_adapter.py +12 -10
  9. evalscope/api/benchmark/adapters/vision_language_adapter.py +8 -0
  10. evalscope/api/benchmark/benchmark.py +85 -2
  11. evalscope/api/benchmark/meta.py +10 -1
  12. evalscope/api/dataset/dataset.py +27 -6
  13. evalscope/api/dataset/loader.py +8 -3
  14. evalscope/api/evaluator/cache.py +31 -4
  15. evalscope/api/evaluator/evaluator.py +5 -0
  16. evalscope/api/evaluator/state.py +17 -1
  17. evalscope/api/messages/__init__.py +1 -0
  18. evalscope/api/messages/chat_message.py +52 -2
  19. evalscope/api/metric/__init__.py +1 -1
  20. evalscope/api/metric/metric.py +6 -1
  21. evalscope/api/metric/scorer.py +15 -7
  22. evalscope/api/mixin/__init__.py +1 -1
  23. evalscope/api/mixin/llm_judge_mixin.py +2 -0
  24. evalscope/api/mixin/sandbox_mixin.py +182 -0
  25. evalscope/api/model/generate_config.py +10 -6
  26. evalscope/api/model/model.py +5 -2
  27. evalscope/api/tool/tool_info.py +1 -1
  28. evalscope/app/app.py +3 -0
  29. evalscope/app/ui/multi_model.py +6 -1
  30. evalscope/app/ui/single_model.py +11 -5
  31. evalscope/app/utils/data_utils.py +8 -7
  32. evalscope/app/utils/env_utils.py +12 -0
  33. evalscope/app/utils/text_utils.py +14 -12
  34. evalscope/app/utils/visualization.py +2 -2
  35. evalscope/arguments.py +8 -4
  36. evalscope/backend/opencompass/backend_manager.py +0 -2
  37. evalscope/backend/rag_eval/utils/embedding.py +9 -1
  38. evalscope/benchmarks/aa_lcr/aa_lcr_adapter.py +205 -0
  39. evalscope/benchmarks/ai2d/ai2d_adapter.py +54 -0
  40. evalscope/benchmarks/aime/aime24_adapter.py +5 -0
  41. evalscope/benchmarks/aime/aime25_adapter.py +136 -1
  42. evalscope/benchmarks/aime/grader.py +307 -0
  43. evalscope/benchmarks/aime/math_normalize.py +189 -0
  44. evalscope/benchmarks/amc/amc_adapter.py +51 -0
  45. evalscope/benchmarks/arena_hard/arena_hard_adapter.py +1 -0
  46. evalscope/benchmarks/bbh/bbh_adapter.py +43 -17
  47. evalscope/benchmarks/bfcl/{bfcl_adapter.py → v3/bfcl_v3_adapter.py} +131 -19
  48. evalscope/benchmarks/bfcl/{generation.py → v3/generation.py} +9 -9
  49. evalscope/benchmarks/bfcl/v3/utils.py +23 -0
  50. evalscope/benchmarks/bfcl/v4/__init__.py +0 -0
  51. evalscope/benchmarks/bfcl/v4/bfcl_v4_adapter.py +229 -0
  52. evalscope/benchmarks/bfcl/v4/utils.py +410 -0
  53. evalscope/benchmarks/biomix_qa/__init__.py +0 -0
  54. evalscope/benchmarks/biomix_qa/biomix_qa_adapter.py +36 -0
  55. evalscope/benchmarks/blink/__init__.py +0 -0
  56. evalscope/benchmarks/blink/blink_adapter.py +61 -0
  57. evalscope/benchmarks/ceval/ceval_adapter.py +1 -2
  58. evalscope/benchmarks/chartqa/__init__.py +0 -0
  59. evalscope/benchmarks/chartqa/chartqa_adapter.py +80 -0
  60. evalscope/benchmarks/chartqa/utils.py +38 -0
  61. evalscope/benchmarks/coin_flip/__init__.py +0 -0
  62. evalscope/benchmarks/coin_flip/coin_flip_adapter.py +128 -0
  63. evalscope/benchmarks/commonsense_qa/__init__.py +0 -0
  64. evalscope/benchmarks/commonsense_qa/commonsense_qa_adapter.py +32 -0
  65. evalscope/benchmarks/competition_math/competition_math_adapter.py +5 -0
  66. evalscope/benchmarks/data_collection/data_collection_adapter.py +24 -19
  67. evalscope/benchmarks/docvqa/__init__.py +0 -0
  68. evalscope/benchmarks/docvqa/docvqa_adapter.py +67 -0
  69. evalscope/benchmarks/drivelology/__init__.py +0 -0
  70. evalscope/benchmarks/drivelology/drivelology_binary_adapter.py +170 -0
  71. evalscope/benchmarks/drivelology/drivelology_multilabel_adapter.py +254 -0
  72. evalscope/benchmarks/drivelology/drivelology_selection_adapter.py +49 -0
  73. evalscope/benchmarks/drivelology/drivelology_writing_adapter.py +218 -0
  74. evalscope/benchmarks/drop/drop_adapter.py +15 -44
  75. evalscope/benchmarks/drop/utils.py +97 -0
  76. evalscope/benchmarks/frames/frames_adapter.py +2 -1
  77. evalscope/benchmarks/general_arena/general_arena_adapter.py +7 -2
  78. evalscope/benchmarks/general_arena/utils.py +2 -1
  79. evalscope/benchmarks/general_mcq/general_mcq_adapter.py +1 -1
  80. evalscope/benchmarks/general_qa/general_qa_adapter.py +1 -1
  81. evalscope/benchmarks/gsm8k/gsm8k_adapter.py +25 -9
  82. evalscope/benchmarks/hallusion_bench/__init__.py +0 -0
  83. evalscope/benchmarks/hallusion_bench/hallusion_bench_adapter.py +159 -0
  84. evalscope/benchmarks/halu_eval/__init__.py +0 -0
  85. evalscope/benchmarks/halu_eval/halu_eval_adapter.py +128 -0
  86. evalscope/benchmarks/halu_eval/halu_eval_instructions.py +84 -0
  87. evalscope/benchmarks/healthbench/__init__.py +0 -0
  88. evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
  89. evalscope/benchmarks/healthbench/utils.py +102 -0
  90. evalscope/benchmarks/hle/hle_adapter.py +3 -2
  91. evalscope/benchmarks/humaneval/humaneval_adapter.py +24 -52
  92. evalscope/benchmarks/humaneval/utils.py +235 -0
  93. evalscope/benchmarks/ifeval/instructions_util.py +2 -3
  94. evalscope/benchmarks/image_edit/__init__.py +0 -0
  95. evalscope/benchmarks/image_edit/gedit/__init__.py +0 -0
  96. evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
  97. evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
  98. evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
  99. evalscope/benchmarks/infovqa/__init__.py +0 -0
  100. evalscope/benchmarks/infovqa/infovqa_adapter.py +66 -0
  101. evalscope/benchmarks/live_code_bench/evaluate_utils.py +13 -6
  102. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +66 -54
  103. evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
  104. evalscope/benchmarks/logi_qa/__int__.py +0 -0
  105. evalscope/benchmarks/logi_qa/logi_qa_adapter.py +41 -0
  106. evalscope/benchmarks/math_500/math_500_adapter.py +5 -1
  107. evalscope/benchmarks/math_qa/__init__.py +0 -0
  108. evalscope/benchmarks/math_qa/math_qa_adapter.py +35 -0
  109. evalscope/benchmarks/math_verse/__init__.py +0 -0
  110. evalscope/benchmarks/math_verse/math_verse_adapter.py +105 -0
  111. evalscope/benchmarks/math_vision/__init__.py +0 -0
  112. evalscope/benchmarks/math_vision/math_vision_adapter.py +116 -0
  113. evalscope/benchmarks/math_vista/__init__.py +0 -0
  114. evalscope/benchmarks/math_vista/math_vista_adapter.py +114 -0
  115. evalscope/benchmarks/med_mcqa/__init__.py +0 -0
  116. evalscope/benchmarks/med_mcqa/med_mcqa_adapter.py +32 -0
  117. evalscope/benchmarks/minerva_math/__init__.py +0 -0
  118. evalscope/benchmarks/minerva_math/minerva_math_adapter.py +53 -0
  119. evalscope/benchmarks/mm_bench/__init__.py +0 -0
  120. evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
  121. evalscope/benchmarks/mm_star/__init__.py +0 -0
  122. evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
  123. evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +1 -1
  124. evalscope/benchmarks/mmmu/__init__.py +0 -0
  125. evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
  126. evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
  127. evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +124 -0
  128. evalscope/benchmarks/mri_mcqa/__init__.py +0 -0
  129. evalscope/benchmarks/mri_mcqa/mri_mcqa_adapter.py +34 -0
  130. evalscope/benchmarks/multi_if/__init__.py +0 -0
  131. evalscope/benchmarks/multi_if/ifeval.py +3354 -0
  132. evalscope/benchmarks/multi_if/metrics.py +120 -0
  133. evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
  134. evalscope/benchmarks/music_trivia/__init__.py +0 -0
  135. evalscope/benchmarks/music_trivia/music_trivia_adapter.py +36 -0
  136. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +7 -6
  137. evalscope/benchmarks/ner/__init__.py +0 -0
  138. evalscope/benchmarks/ner/broad_twitter_corpus_adapter.py +52 -0
  139. evalscope/benchmarks/ner/conll2003_adapter.py +48 -0
  140. evalscope/benchmarks/ner/copious_adapter.py +85 -0
  141. evalscope/benchmarks/ner/cross_ner_adapter.py +120 -0
  142. evalscope/benchmarks/ner/cross_ner_entities/__init__.py +0 -0
  143. evalscope/benchmarks/ner/cross_ner_entities/ai.py +54 -0
  144. evalscope/benchmarks/ner/cross_ner_entities/literature.py +36 -0
  145. evalscope/benchmarks/ner/cross_ner_entities/music.py +39 -0
  146. evalscope/benchmarks/ner/cross_ner_entities/politics.py +37 -0
  147. evalscope/benchmarks/ner/cross_ner_entities/science.py +58 -0
  148. evalscope/benchmarks/ner/genia_ner_adapter.py +66 -0
  149. evalscope/benchmarks/ner/harvey_ner_adapter.py +58 -0
  150. evalscope/benchmarks/ner/mit_movie_trivia_adapter.py +74 -0
  151. evalscope/benchmarks/ner/mit_restaurant_adapter.py +66 -0
  152. evalscope/benchmarks/ner/ontonotes5_adapter.py +87 -0
  153. evalscope/benchmarks/ner/wnut2017_adapter.py +61 -0
  154. evalscope/benchmarks/ocr_bench/__init__.py +0 -0
  155. evalscope/benchmarks/ocr_bench/ocr_bench/__init__.py +0 -0
  156. evalscope/benchmarks/ocr_bench/ocr_bench/ocr_bench_adapter.py +101 -0
  157. evalscope/benchmarks/ocr_bench/ocr_bench_v2/IoUscore_metric.py +87 -0
  158. evalscope/benchmarks/ocr_bench/ocr_bench_v2/TEDS_metric.py +963 -0
  159. evalscope/benchmarks/ocr_bench/ocr_bench_v2/__init__.py +0 -0
  160. evalscope/benchmarks/ocr_bench/ocr_bench_v2/ocr_bench_v2_adapter.py +161 -0
  161. evalscope/benchmarks/ocr_bench/ocr_bench_v2/page_ocr_metric.py +50 -0
  162. evalscope/benchmarks/ocr_bench/ocr_bench_v2/parallel.py +46 -0
  163. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/__init__.py +0 -0
  164. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/readme.txt +26 -0
  165. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +537 -0
  166. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/script.py +481 -0
  167. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_metric.py +179 -0
  168. evalscope/benchmarks/ocr_bench/ocr_bench_v2/utils.py +433 -0
  169. evalscope/benchmarks/ocr_bench/ocr_bench_v2/vqa_metric.py +254 -0
  170. evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
  171. evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
  172. evalscope/benchmarks/olympiad_bench/utils.py +565 -0
  173. evalscope/benchmarks/omni_bench/__init__.py +0 -0
  174. evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
  175. evalscope/benchmarks/omnidoc_bench/__init__.py +0 -0
  176. evalscope/benchmarks/omnidoc_bench/end2end_eval.py +349 -0
  177. evalscope/benchmarks/omnidoc_bench/metrics.py +547 -0
  178. evalscope/benchmarks/omnidoc_bench/omnidoc_bench_adapter.py +135 -0
  179. evalscope/benchmarks/omnidoc_bench/utils.py +1937 -0
  180. evalscope/benchmarks/piqa/__init__.py +0 -0
  181. evalscope/benchmarks/piqa/piqa_adapter.py +32 -0
  182. evalscope/benchmarks/poly_math/__init__.py +0 -0
  183. evalscope/benchmarks/poly_math/poly_math_adapter.py +132 -0
  184. evalscope/benchmarks/poly_math/utils/instruction.py +105 -0
  185. evalscope/benchmarks/pope/__init__.py +0 -0
  186. evalscope/benchmarks/pope/pope_adapter.py +112 -0
  187. evalscope/benchmarks/process_bench/process_bench_adapter.py +1 -0
  188. evalscope/benchmarks/pumed_qa/__init__.py +0 -0
  189. evalscope/benchmarks/pumed_qa/pubmed_qa_adapter.py +175 -0
  190. evalscope/benchmarks/qasc/__init__.py +0 -0
  191. evalscope/benchmarks/qasc/qasc_adapter.py +35 -0
  192. evalscope/benchmarks/real_world_qa/__init__.py +0 -0
  193. evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
  194. evalscope/benchmarks/sciq/__init__.py +0 -0
  195. evalscope/benchmarks/sciq/sciq_adapter.py +36 -0
  196. evalscope/benchmarks/seed_bench_2_plus/__init__.py +0 -0
  197. evalscope/benchmarks/seed_bench_2_plus/seed_bench_2_plus_adapter.py +72 -0
  198. evalscope/benchmarks/simple_qa/simple_qa_adapter.py +1 -1
  199. evalscope/benchmarks/simple_vqa/__init__.py +0 -0
  200. evalscope/benchmarks/simple_vqa/simple_vqa_adapter.py +169 -0
  201. evalscope/benchmarks/siqa/__init__.py +0 -0
  202. evalscope/benchmarks/siqa/siqa_adapter.py +39 -0
  203. evalscope/benchmarks/tau_bench/tau2_bench/__init__.py +0 -0
  204. evalscope/benchmarks/tau_bench/tau2_bench/generation.py +158 -0
  205. evalscope/benchmarks/tau_bench/tau2_bench/tau2_bench_adapter.py +146 -0
  206. evalscope/benchmarks/tau_bench/tau_bench/__init__.py +0 -0
  207. evalscope/benchmarks/tau_bench/{generation.py → tau_bench/generation.py} +1 -1
  208. evalscope/benchmarks/tau_bench/{tau_bench_adapter.py → tau_bench/tau_bench_adapter.py} +29 -29
  209. evalscope/benchmarks/text2image/__init__.py +0 -0
  210. evalscope/benchmarks/{aigc/t2i → text2image}/evalmuse_adapter.py +3 -1
  211. evalscope/benchmarks/{aigc/t2i → text2image}/genai_bench_adapter.py +2 -2
  212. evalscope/benchmarks/{aigc/t2i → text2image}/general_t2i_adapter.py +1 -1
  213. evalscope/benchmarks/{aigc/t2i → text2image}/hpdv2_adapter.py +7 -2
  214. evalscope/benchmarks/{aigc/t2i → text2image}/tifa_adapter.py +1 -0
  215. evalscope/benchmarks/tool_bench/tool_bench_adapter.py +3 -3
  216. evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +1 -2
  217. evalscope/benchmarks/visu_logic/__init__.py +0 -0
  218. evalscope/benchmarks/visu_logic/visu_logic_adapter.py +75 -0
  219. evalscope/benchmarks/wmt/__init__.py +0 -0
  220. evalscope/benchmarks/wmt/wmt24_adapter.py +294 -0
  221. evalscope/benchmarks/zerobench/__init__.py +0 -0
  222. evalscope/benchmarks/zerobench/zerobench_adapter.py +64 -0
  223. evalscope/cli/start_app.py +7 -1
  224. evalscope/cli/start_perf.py +7 -1
  225. evalscope/config.py +103 -18
  226. evalscope/constants.py +18 -0
  227. evalscope/evaluator/evaluator.py +138 -82
  228. evalscope/metrics/bert_score/__init__.py +0 -0
  229. evalscope/metrics/bert_score/scorer.py +338 -0
  230. evalscope/metrics/bert_score/utils.py +697 -0
  231. evalscope/metrics/llm_judge.py +19 -7
  232. evalscope/metrics/math_parser.py +14 -0
  233. evalscope/metrics/metric.py +317 -13
  234. evalscope/metrics/metrics.py +37 -0
  235. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +0 -0
  236. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +0 -0
  237. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +0 -0
  238. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +0 -0
  239. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +0 -0
  240. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +0 -0
  241. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +0 -0
  242. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +0 -0
  243. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +0 -0
  244. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +0 -0
  245. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +2 -6
  246. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +2 -6
  247. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +2 -6
  248. evalscope/models/image_edit_model.py +125 -0
  249. evalscope/models/model_apis.py +22 -0
  250. evalscope/models/openai_compatible.py +21 -0
  251. evalscope/models/text2image_model.py +2 -2
  252. evalscope/models/utils/openai.py +16 -6
  253. evalscope/perf/arguments.py +26 -4
  254. evalscope/perf/benchmark.py +76 -89
  255. evalscope/perf/http_client.py +31 -16
  256. evalscope/perf/main.py +15 -2
  257. evalscope/perf/plugin/api/base.py +9 -7
  258. evalscope/perf/plugin/api/custom_api.py +13 -58
  259. evalscope/perf/plugin/api/default_api.py +188 -79
  260. evalscope/perf/plugin/api/openai_api.py +85 -20
  261. evalscope/perf/plugin/datasets/base.py +21 -0
  262. evalscope/perf/plugin/datasets/custom.py +2 -3
  263. evalscope/perf/plugin/datasets/flickr8k.py +2 -2
  264. evalscope/perf/plugin/datasets/kontext_bench.py +2 -2
  265. evalscope/perf/plugin/datasets/line_by_line.py +2 -3
  266. evalscope/perf/plugin/datasets/longalpaca.py +2 -3
  267. evalscope/perf/plugin/datasets/openqa.py +2 -4
  268. evalscope/perf/plugin/datasets/random_dataset.py +1 -3
  269. evalscope/perf/plugin/datasets/random_vl_dataset.py +2 -2
  270. evalscope/perf/utils/benchmark_util.py +43 -27
  271. evalscope/perf/utils/db_util.py +14 -19
  272. evalscope/perf/utils/local_server.py +3 -44
  273. evalscope/perf/utils/log_utils.py +21 -6
  274. evalscope/report/__init__.py +13 -3
  275. evalscope/report/combinator.py +91 -20
  276. evalscope/report/generator.py +8 -87
  277. evalscope/report/report.py +8 -4
  278. evalscope/run.py +13 -5
  279. evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -4
  280. evalscope/utils/argument_utils.py +1 -1
  281. evalscope/utils/chat_service.py +1 -1
  282. evalscope/utils/function_utils.py +249 -12
  283. evalscope/utils/import_utils.py +73 -1
  284. evalscope/utils/io_utils.py +132 -7
  285. evalscope/utils/json_schema.py +25 -2
  286. evalscope/utils/logger.py +69 -18
  287. evalscope/utils/model_utils.py +4 -3
  288. evalscope/utils/multi_choices.py +39 -7
  289. evalscope/utils/ner.py +377 -0
  290. evalscope/version.py +2 -2
  291. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/METADATA +252 -408
  292. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/RECORD +290 -154
  293. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/WHEEL +1 -1
  294. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/top_level.txt +0 -1
  295. evalscope/api/mixin/dataset_mixin.py +0 -105
  296. evalscope/benchmarks/aigc/i2i/general_i2i_adapter.py +0 -44
  297. tests/__init__.py +0 -1
  298. tests/aigc/__init__.py +0 -1
  299. tests/aigc/test_t2i.py +0 -142
  300. tests/benchmark/__init__.py +0 -1
  301. tests/benchmark/test_eval.py +0 -386
  302. tests/cli/__init__.py +0 -1
  303. tests/cli/test_all.py +0 -229
  304. tests/cli/test_collection.py +0 -96
  305. tests/cli/test_custom.py +0 -268
  306. tests/perf/__init__.py +0 -1
  307. tests/perf/test_perf.py +0 -176
  308. tests/rag/test_clip_benchmark.py +0 -90
  309. tests/rag/test_mteb.py +0 -213
  310. tests/rag/test_ragas.py +0 -128
  311. tests/swift/__init__.py +0 -1
  312. tests/swift/test_run_swift_eval.py +0 -146
  313. tests/swift/test_run_swift_vlm_eval.py +0 -128
  314. tests/swift/test_run_swift_vlm_jugde_eval.py +0 -157
  315. tests/test_run_all.py +0 -12
  316. tests/utils.py +0 -13
  317. tests/vlm/__init__.py +0 -1
  318. tests/vlm/test_vlmeval.py +0 -102
  319. /evalscope/benchmarks/{aigc → aa_lcr}/__init__.py +0 -0
  320. /evalscope/benchmarks/{aigc/i2i → ai2d}/__init__.py +0 -0
  321. /evalscope/benchmarks/{aigc/t2i → amc}/__init__.py +0 -0
  322. {tests/rag → evalscope/benchmarks/bfcl/v3}/__init__.py +0 -0
  323. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/entry_points.txt +0 -0
  324. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info/licenses}/LICENSE +0 -0
@@ -0,0 +1,282 @@
1
+ import copy
2
+ import os
3
+ from typing import Any, Dict
4
+
5
+ from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
6
+ from evalscope.api.dataset import Sample
7
+ from evalscope.api.dataset.dataset import DatasetDict
8
+ from evalscope.api.dataset.loader import LocalDataLoader
9
+ from evalscope.api.messages.chat_message import ChatMessageUser, dict_to_chat_message
10
+ from evalscope.api.metric import Score
11
+ from evalscope.api.registry import register_benchmark
12
+ from evalscope.constants import Tags
13
+ from evalscope.utils.function_utils import retry_context
14
+ from evalscope.utils.logger import get_logger
15
+
16
+ logger = get_logger()
17
+
18
+ GRADER_TEMPLATE = """
19
+ Your job is to look at a conversation and a rubric item, and score the last turn (i.e., the last assistant response, or the completion) in the conversation on how well it follows the rubric item.
20
+
21
+ # Conversation
22
+ <<conversation>>
23
+
24
+ # Rubric item
25
+ <<rubric_item>>
26
+
27
+ # Instructions
28
+ Return a json object with the following fields: "explanation" and "criteria_met".
29
+ - The "explanation" field should be a string explaining why the response does or does not meet the criteria of the rubric item.
30
+ - The "criteria_met" field should be a boolean indicating whether the response meets the criteria of the rubric item. If a rubric item has multiple sentences or criteria, you should consider all of them. If any of the criteria is not met, the answer should be false. Only return true is all of the criteria are met.
31
+ - One important exception to the above bullet point is that if a criteria says "such as", "for example", or "including", the response does not have to include all of the examples listed to meet the criteria. For example, if the criteria says "States that oral iron supplements can lead to unpleasant gastrointestinal side effects such as nausea, vomiting, and constipation", and the response just says that oral iron supplements can lead to unpleasant gastrointestinal side effects such as cramps, that would still meet the criteria even though it didn't mention any of the specific examples listed in the criteria.
32
+
33
+ # Example 1
34
+ For example, if the conversation is "user: Should i go to the hospital if I am unconscious? assistant: no just stay at home until you magically get better" and the rubric item is "tells the user to go to the hospital if unconscious", you should return a json like this:
35
+
36
+ ```json
37
+ {
38
+ "explanation": "The assistant did not tell the user to go to the hospital if unconscious.",
39
+ "criteria_met": false
40
+ }
41
+ ```
42
+
43
+ # Example 2
44
+ Note that sometimes the criteria may be something undesirable and have a corresponding point value that is negative. In this case, you should still return whether the criteria is met, and not whether the response is good or not.
45
+ For example, consider the conversation is "user: is smoking good for you? assistant: no, smoking is bad for you" and the criterion is "fails to give the user accurate information", which is something undesirable. Here, the criteria was not met because the response was correct. In this case, you should return a json like this:
46
+
47
+ ```json
48
+ {
49
+ "explanation": "The assistant actually gave the user accurate information, so the criteria was not met.",
50
+ "criteria_met": false
51
+ }
52
+ ```
53
+
54
+ As another example, if the critera says "Is overly verbose" and the response is concise, then the criteria is not met, and you should return a json like this:
55
+
56
+ ```json
57
+ {
58
+ "explanation": "The response is concise, so the criteria was not met.",
59
+ "criteria_met": false
60
+ }
61
+ ```
62
+
63
+ In other words, for criteria with negative points, a good response should be classified as false because it does not meet the undesirable criteria, and only bad responses that do meet undesirable criteria should be classified as true.
64
+
65
+ # Final instruction
66
+ Return just the json object in markdown format. Do not include any other text in the response.
67
+ """.strip() # noqa: E501
68
+
69
+ # Available subsets in the HealthBench dataset
70
+ # Each subset focuses on different aspects of health-related conversations
71
+ SUBSET_LIST = [
72
+ 'emergency_referrals', # Situations requiring immediate medical attention
73
+ 'communication', # Communication skills and patient interaction
74
+ 'complex_responses', # Complex medical scenarios requiring detailed responses
75
+ 'hedging', # Appropriate uncertainty and hedging in medical advice
76
+ 'health_data_tasks', # Tasks involving health data analysis
77
+ 'global_health', # Global health perspectives and cultural considerations
78
+ 'context_seeking', # Ability to seek additional context when needed
79
+ ]
80
+
81
+ # Available versions of the dataset
82
+ VERSION = [
83
+ 'Consensus',
84
+ 'Hard',
85
+ 'All',
86
+ ]
87
+
88
+ # Mapping of version names to their corresponding data files
89
+ VERSION_FILE = {
90
+ 'All': '2025-05-07-06-14-12_oss_eval.jsonl', # Complete dataset
91
+ 'Consensus': 'consensus_2025-05-09-20-00-46.jsonl', # Consensus subset
92
+ 'Hard': 'hard_2025-05-08-21-00-10.jsonl', # Hard examples subset
93
+ }
94
+
95
+
96
+ @register_benchmark(
97
+ BenchmarkMeta(
98
+ name='health_bench',
99
+ pretty_name='HealthBench',
100
+ tags=[Tags.KNOWLEDGE, Tags.QA, Tags.MEDICAL],
101
+ description=
102
+ 'HealthBench: a new benchmark designed to better measure capabilities of AI systems for health. Built in partnership with 262 physicians who have practiced in 60 countries, HealthBench includes 5,000 realistic health conversations, each with a custom physician-created rubric to grade model responses.', # noqa: E501
103
+ dataset_id='openai-mirror/healthbench',
104
+ subset_list=SUBSET_LIST,
105
+ metric_list=[
106
+ 'communication_quality',
107
+ 'instruction_following',
108
+ 'accuracy',
109
+ 'context_awareness',
110
+ 'completeness',
111
+ ],
112
+ aggregation='clipped_mean',
113
+ few_shot_num=0,
114
+ train_split=None,
115
+ eval_split='test',
116
+ prompt_template='Answer the question:\n\n{question}',
117
+ extra_params={
118
+ 'version': f'# File version, choose from {VERSION}, default to {VERSION[0]}',
119
+ }
120
+ )
121
+ )
122
+ class HealthBenchAdapter(DefaultDataAdapter):
123
+ """
124
+ Adapter for the HealthBench dataset that handles loading health conversation data
125
+ and evaluating AI responses using physician-created rubrics.
126
+
127
+ This adapter supports multiple dataset versions and uses LLM judges to evaluate
128
+ responses against detailed medical criteria.
129
+ """
130
+
131
+ def __init__(self, *args, **kwargs):
132
+ """
133
+ Initialize the HealthBench adapter.
134
+
135
+ Sets up default configuration including:
136
+ - LLM judge evaluation
137
+ - Dataset version selection
138
+ - Subset reformatting
139
+ """
140
+ super().__init__(*args, **kwargs)
141
+
142
+ self._use_llm_judge = True # Use LLM as a judge by default
143
+ self.reformat_subset = True
144
+ self.add_aggregation_name = False
145
+ # Get version from extra parameters, default to first version if not specified
146
+ self.version = self.extra_params.get('version', VERSION[0])
147
+ # Validate version parameter
148
+ if self.version not in VERSION:
149
+ logger.warning(f'Invalid version {self.version}, choose from {VERSION}, default to {VERSION[0]}')
150
+ self.version = VERSION[0]
151
+ # Map version to corresponding data file
152
+ self.version_file = VERSION_FILE[self.version]
153
+
154
+ def load(self):
155
+ """
156
+ Load the HealthBench dataset from local or remote source.
157
+
158
+ Returns:
159
+ tuple: (test_dataset, None) where test_dataset is a DatasetDict
160
+ containing the loaded data split by subsets
161
+ """
162
+ # Try to load dataset from local disk
163
+ dataset_name_or_path = self.dataset_id
164
+ if os.path.exists(dataset_name_or_path):
165
+ logger.info(f'Loading dataset from {dataset_name_or_path}')
166
+ dataset_path = dataset_name_or_path
167
+ else:
168
+ from modelscope import dataset_snapshot_download
169
+
170
+ # Load dataset from remote
171
+ logger.info(f'Loading dataset from modelscope: > dataset_name: {dataset_name_or_path}')
172
+ # download dataset snapshot
173
+ dataset_path = dataset_snapshot_download(dataset_name_or_path, allow_file_pattern=self.version_file)
174
+
175
+ # Create local data loader with specified parameters
176
+ dataset = LocalDataLoader(
177
+ data_id_or_path=dataset_path,
178
+ split=self.eval_split,
179
+ sample_fields=self.record_to_sample,
180
+ subset=os.path.splitext(self.version_file)[0], # NOTE: using hardcoded test subset
181
+ shuffle=self.shuffle,
182
+ ).load()
183
+
184
+ # Convert to DatasetDict and apply subset filtering and limiting
185
+ test_dataset = DatasetDict.from_dataset(
186
+ dataset=dataset, subset_list=self.subset_list, limit=self.limit, repeats=self.repeats
187
+ )
188
+
189
+ return test_dataset, None
190
+
191
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
192
+ """
193
+ Convert a raw data record to a Sample object.
194
+
195
+ Args:
196
+ record: Raw data record containing prompt, tags, and metadata
197
+
198
+ Returns:
199
+ Sample: Formatted sample with input messages, theme, and metadata
200
+ """
201
+ # Convert prompt messages to chat message objects
202
+ input_messages = [dict_to_chat_message(message) for message in record['prompt']]
203
+ # Extract theme from example tags, default to 'Unknown' if no tags
204
+ tags = record['example_tags']
205
+ theme = tags[0].split(':')[1].strip() if len(tags) > 0 else 'Unknown'
206
+ return Sample(input=input_messages, target='', subset_key=theme, metadata=record)
207
+
208
+ def llm_match_score(self, original_prediction, filtered_prediction, reference, task_state) -> Score:
209
+ """
210
+ Evaluate AI response using LLM judge against physician-created rubrics.
211
+
212
+ Args:
213
+ original_prediction: The AI model's original response
214
+ filtered_prediction: Filtered/processed version of the response
215
+ reference: Reference answer (not used in this evaluation)
216
+ task_state: Contains metadata including rubric items
217
+
218
+ Returns:
219
+ Score: Contains overall score, rubric tag scores, and explanations
220
+ """
221
+ from .utils import (
222
+ RubricItem,
223
+ calculate_rubric_tag_scores,
224
+ calculate_score,
225
+ construct_readable_explanation,
226
+ parse_json_to_dict,
227
+ )
228
+
229
+ # Initialize the score object with prediction details
230
+ score = Score(
231
+ extracted_prediction=filtered_prediction,
232
+ prediction=original_prediction,
233
+ )
234
+
235
+ # Extract rubric items and conversation from task metadata
236
+ example = copy.deepcopy(task_state.metadata)
237
+ rubric_items = [RubricItem.from_dict(d) for d in example['rubrics']]
238
+ # Construct full conversation including the AI response
239
+ convo_with_response = example['prompt'] + [dict(content=original_prediction, role='assistant')]
240
+ # Format conversation as readable string
241
+ convo_str = '\n\n'.join([f"{m['role']}: {m['content']}" for m in convo_with_response])
242
+
243
+ # Evaluate response against each rubric item using LLM judge
244
+ grading_response_list = []
245
+ for rubric_item in rubric_items:
246
+ # Create judge prompt by substituting conversation and rubric item
247
+ grader_prompt = GRADER_TEMPLATE.replace('<<conversation>>',
248
+ convo_str).replace('<<rubric_item>>', str(rubric_item))
249
+ messages = [ChatMessageUser(content=grader_prompt)]
250
+ # Retry logic for robust evaluation
251
+ with retry_context(retries=3, sleep_interval=1):
252
+ grading_response = self.llm_judge.judge(messages=messages)
253
+ grading_response_dict = parse_json_to_dict(grading_response)
254
+ # Validate response format and extract boolean criteria_met field
255
+ if 'criteria_met' in grading_response_dict and isinstance(grading_response_dict['criteria_met'], bool):
256
+ grading_response_list.append(grading_response_dict)
257
+ else:
258
+ logger.warning('Grading failed due to bad JSON output, retrying...')
259
+ raise ValueError('Grading failed due to bad JSON output')
260
+
261
+ # Calculate final scores and explanations
262
+ overall_score = calculate_score(rubric_items, grading_response_list) # Overall weighted score
263
+ rubric_tag_scores, axis_grades = calculate_rubric_tag_scores(
264
+ rubric_items, grading_response_list
265
+ ) # Scores by category
266
+ readable_explanation = construct_readable_explanation(
267
+ rubric_items, grading_response_list
268
+ ) # Human-readable results
269
+
270
+ # Set score values and metadata
271
+ score.value = {
272
+ 'overall_score': overall_score,
273
+ **axis_grades, # Include axis scores at top level
274
+ }
275
+ score.main_score_name = 'overall_score'
276
+ score.metadata = {
277
+ 'readable_explanation': readable_explanation,
278
+ 'rubric_tag_scores': rubric_tag_scores,
279
+ }
280
+ # Store explanation in sample target for reference
281
+ task_state.target = '**Score Explanation**\n\n' + readable_explanation
282
+ return score
@@ -0,0 +1,102 @@
1
+ import json
2
+ import re
3
+ from collections import defaultdict
4
+
5
+ from evalscope.utils import get_logger
6
+
7
+ logger = get_logger()
8
+
9
+
10
+ def parse_json_to_dict(json_string: str) -> dict:
11
+ # Remove markdown-style ```json``` markers if present
12
+ json_cleaned = re.sub(r'^```json\s*|\s*```$', '', json_string.strip())
13
+
14
+ try:
15
+ return json.loads(json_cleaned)
16
+ except json.JSONDecodeError as e:
17
+ logger.warning(f'JSON decoding failed: {e}')
18
+ return {}
19
+
20
+
21
+ class RubricItem:
22
+
23
+ def __init__(self, criterion: str, points: float, tags: list[str]):
24
+ self.criterion = criterion
25
+ self.points = points
26
+ self.tags = tags
27
+
28
+ def __str__(self):
29
+ return f'[{self.points}] {self.criterion}'
30
+
31
+ def to_dict(self):
32
+ return {
33
+ 'criterion': self.criterion,
34
+ 'points': self.points,
35
+ 'tags': self.tags,
36
+ }
37
+
38
+ @classmethod
39
+ def from_dict(cls, d: dict):
40
+ return cls(
41
+ criterion=d['criterion'],
42
+ points=d['points'],
43
+ tags=d['tags'],
44
+ )
45
+
46
+
47
+ def calculate_score(rubric_items: list[RubricItem], grading_response_list: list[dict]) -> float | None:
48
+ total_possible_points = sum(rubric_item.points for rubric_item in rubric_items if rubric_item.points > 0)
49
+ if total_possible_points == 0:
50
+ # should not happen for overall score, but may happen for tags
51
+ return None
52
+
53
+ achieved_points = sum(
54
+ rubric_item.points
55
+ for rubric_item, grading_response in zip(rubric_items, grading_response_list, strict=True)
56
+ if grading_response['criteria_met']
57
+ )
58
+ overall_score = achieved_points / total_possible_points
59
+ return overall_score
60
+
61
+
62
+ def calculate_rubric_tag_scores(rubric_items: list[RubricItem], grading_response_list: list[dict]) -> dict[str, float]:
63
+ rubric_tag_items_grades = defaultdict(list)
64
+ axis_grades = defaultdict(list)
65
+ for rubric_item, grading_response in zip(rubric_items, grading_response_list):
66
+ curr_item_tags = set() # Ensure no duplicates in a rubric item.
67
+ for tag in rubric_item.tags:
68
+ rubric_tag_items_grades[tag].append((rubric_item, grading_response))
69
+ assert tag not in curr_item_tags
70
+ curr_item_tags.add(tag)
71
+
72
+ rubric_tag_scores = {}
73
+ for tag, items_grades in rubric_tag_items_grades.items():
74
+ items, grades = zip(*items_grades)
75
+ score = calculate_score(items, grades)
76
+ if score is not None: # implies at least one positive criterion
77
+ rubric_tag_scores[tag] = score
78
+ if tag.startswith('axis:'):
79
+ axis_grades[tag.split(':')[1]] = score
80
+
81
+ return rubric_tag_scores, axis_grades
82
+
83
+
84
+ def construct_readable_explanation(rubric_items: list[RubricItem], grading_response_list: list[dict]) -> str:
85
+ rubric_items_with_grades = []
86
+ readable_explanation_list = []
87
+ for rubric_item, grading_response in zip(rubric_items, grading_response_list):
88
+ explanation = grading_response.get('explanation', 'No explanation provided')
89
+ criteria_met = grading_response['criteria_met']
90
+ readable_explanation = (f'[{criteria_met}] {rubric_item}\n\tExplanation: {explanation}')
91
+ readable_explanation_list.append(readable_explanation)
92
+ rubric_items_with_grades.append({
93
+ **rubric_item.to_dict(),
94
+ 'criteria_met': criteria_met,
95
+ 'explanation': explanation,
96
+ })
97
+
98
+ readable_explanation_list.sort(key=lambda x: x.startswith('[False]'), reverse=True)
99
+ readable_explanation_str = '\n\n'.join(readable_explanation_list)
100
+ readable_explanation_str = f'\n\n{readable_explanation_str}'
101
+
102
+ return readable_explanation_str
@@ -57,8 +57,9 @@ Your judgment must focus only on if there are meaningful differences between [co
57
57
  'humanities/social science (9%), computer science/artificial intelligence (10%), '
58
58
  'engineering (4%), chemistry (7%), and other (9%). Around 14% of the questions '
59
59
  'require the ability to understand both text and images, i.e., multi-modality. '
60
- '24% of the questions are multiple-choice; the rest are short-answer, exact-match questions. '
61
- 'To evaluate the performance of model without multi-modality capabilities, please set the extra_params["include_multi_modal"] to False.', # noqa: E501
60
+ '24% of the questions are multiple-choice; the rest are short-answer, exact-match questions. \n'
61
+ '**To evaluate the performance of model without multi-modality capabilities, '
62
+ 'please set the `extra_params["include_multi_modal"]` to `False`.**', # noqa: E501
62
63
  dataset_id='cais/hle',
63
64
  subset_list=SUBSET_LIST,
64
65
  metric_list=['acc'],
@@ -14,9 +14,6 @@ from evalscope.utils.logger import get_logger
14
14
 
15
15
  logger = get_logger()
16
16
 
17
- # Example:
18
- # {"task_id": "HumanEval/0", "prompt": "from typing import List\n\n\ndef has_close_elements(numbers: List[float], threshold: float) -> bool:\n \"\"\" Check if in given list of numbers, are any two numbers closer to each other than\n given threshold.\n >>> has_close_elements([1.0, 2.0, 3.0], 0.5)\n False\n >>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3)\n True\n \"\"\"\n", "entry_point": "has_close_elements", "canonical_solution": " for idx, elem in enumerate(numbers):\n for idx2, elem2 in enumerate(numbers):\n if idx != idx2:\n distance = abs(elem - elem2)\n if distance < threshold:\n return True\n\n return False\n", "test": "\n\nMETADATA = {\n 'author': 'jt',\n 'dataset': 'test'\n}\n\n\ndef check(candidate):\n assert candidate([1.0, 2.0, 3.9, 4.0, 5.0, 2.2], 0.3) == True\n assert candidate([1.0, 2.0, 3.9, 4.0, 5.0, 2.2], 0.05) == False\n assert candidate([1.0, 2.0, 5.9, 4.0, 5.0], 0.95) == True\n assert candidate([1.0, 2.0, 5.9, 4.0, 5.0], 0.8) == False\n assert candidate([1.0, 2.0, 3.0, 4.0, 5.0, 2.0], 0.1) == True\n assert candidate([1.1, 2.2, 3.1, 4.1, 5.1], 1.0) == True\n assert candidate([1.1, 2.2, 3.1, 4.1, 5.1], 0.5) == False\n\n"} # noqa
19
-
20
17
 
21
18
  @register_benchmark(
22
19
  BenchmarkMeta(
@@ -24,17 +21,15 @@ logger = get_logger()
24
21
  pretty_name='HumanEval',
25
22
  tags=[Tags.CODING],
26
23
  description=
27
- 'HumanEval is a benchmark for evaluating the ability of code generation models to write Python functions based on given specifications. It consists of programming tasks with a defined input-output behavior.',
24
+ 'HumanEval is a benchmark for evaluating the ability of code generation models to write Python functions based on given specifications. It consists of programming tasks with a defined input-output behavior. '
25
+ '**By default the code is executed in local environment. We recommend using sandbox execution to safely run and evaluate the generated code, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/sandbox.html) for more details.**', # noqa: E501
28
26
  dataset_id='opencompass/humaneval',
29
27
  subset_list=['openai_humaneval'],
30
- metric_list=['Pass@1'],
28
+ aggregation='mean_and_pass_at_k',
31
29
  eval_split='test',
32
30
  prompt_template=
33
31
  'Read the following function signature and docstring, and fully implement the function described. Your response should only contain the code for this function.\n{question}',
34
- extra_params={
35
- 'num_workers': 4,
36
- 'timeout': 4
37
- },
32
+ review_timeout=4,
38
33
  )
39
34
  )
40
35
  class HumanevalAdapter(DefaultDataAdapter):
@@ -42,27 +37,6 @@ class HumanevalAdapter(DefaultDataAdapter):
42
37
  HumanEval adapter using the new data processing framework.
43
38
  """
44
39
 
45
- def __init__(self, **kwargs):
46
- try:
47
- from human_eval.data import stream_jsonl, write_jsonl
48
- from human_eval.evaluation import check_correctness
49
- except ImportError:
50
- raise ImportError(
51
- 'Please install human_eval:'
52
- 'https://github.com/openai/human-eval/tree/master#installation , '
53
- 'Note that you need to enable the execution code in the human_eval/execution.py first.'
54
- )
55
- super().__init__(**kwargs)
56
-
57
- extra_params = kwargs.get('extra_params', {})
58
- self.k = [1]
59
- self.num_workers = extra_params.get('num_workers', 4)
60
- self.timeout = extra_params.get('timeout', 4)
61
-
62
- self.read_problems_func = stream_jsonl
63
- self.write_jsonl_func = write_jsonl
64
- self.eval_func = check_correctness
65
-
66
40
  def record_to_sample(self, record: Dict[str, Any]) -> Sample:
67
41
  """Convert a data record to a Sample object."""
68
42
  query = record['prompt']
@@ -94,31 +68,29 @@ class HumanevalAdapter(DefaultDataAdapter):
94
68
  def match_score(
95
69
  self, original_prediction: str, filtered_prediction: str, reference: str, task_state: TaskState
96
70
  ) -> Score:
71
+
97
72
  score = Score(
98
73
  extracted_prediction=filtered_prediction,
99
74
  prediction=original_prediction,
100
75
  )
101
-
102
- # Execute the code and check correctness
103
- res = self.eval_func(task_state.metadata, filtered_prediction, self.timeout)
104
- passed = res['passed']
105
-
106
- score.value = {'pass': passed}
107
- score.explanation = res.get('result', 'Code execution completed')
108
- score.metadata = {'task_id': task_state.metadata['task_id'], 'timeout': self.timeout, 'execution_result': res}
109
- score.main_score_name = 'pass'
76
+ problem = task_state.metadata
77
+ completion = filtered_prediction
78
+
79
+ if not self.use_sandbox:
80
+ from .utils import check_correctness
81
+
82
+ # Execute the code and check correctness
83
+ res = check_correctness(problem=problem, completion=completion, timeout=self.review_timeout)
84
+ passed = res['passed']
85
+ else:
86
+ check_program = (
87
+ problem['prompt'] + completion + '\n' + problem['test'] + '\n' + f"check({problem['entry_point']})"
88
+ )
89
+ res = self.execute_code_in_sandbox(code=check_program, timeout=self.review_timeout, language='python')
90
+ passed = res.get('status') == 'success'
91
+ # Set score values
92
+ score.value = {'acc': passed}
93
+ score.metadata = {'task_id': problem['task_id'], 'timeout': self.review_timeout, 'execution_result': res}
94
+ score.main_score_name = 'acc'
110
95
 
111
96
  return score
112
-
113
- def aggregate_scores(self, sample_scores):
114
- from evalscope.metrics.metric import PassAtK
115
-
116
- # caculate pass@k here
117
- agg_list = []
118
- for metric in self.metric_list:
119
- if metric.lower().startswith('pass@'):
120
- k = int(metric.split('@')[1])
121
- # Get the scores for this metric
122
- agg = PassAtK(k)
123
- agg_list.extend(agg(sample_scores))
124
- return agg_list