evalscope 1.0.0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (324) hide show
  1. evalscope/api/benchmark/__init__.py +9 -1
  2. evalscope/api/benchmark/adapters/__init__.py +4 -0
  3. evalscope/api/benchmark/adapters/agent_adapter.py +8 -0
  4. evalscope/api/benchmark/adapters/default_data_adapter.py +75 -4
  5. evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
  6. evalscope/api/benchmark/adapters/multi_choice_adapter.py +5 -2
  7. evalscope/api/benchmark/adapters/ner_adapter.py +212 -0
  8. evalscope/api/benchmark/adapters/text2image_adapter.py +12 -10
  9. evalscope/api/benchmark/adapters/vision_language_adapter.py +8 -0
  10. evalscope/api/benchmark/benchmark.py +85 -2
  11. evalscope/api/benchmark/meta.py +10 -1
  12. evalscope/api/dataset/dataset.py +27 -6
  13. evalscope/api/dataset/loader.py +8 -3
  14. evalscope/api/evaluator/cache.py +31 -4
  15. evalscope/api/evaluator/evaluator.py +5 -0
  16. evalscope/api/evaluator/state.py +17 -1
  17. evalscope/api/messages/__init__.py +1 -0
  18. evalscope/api/messages/chat_message.py +52 -2
  19. evalscope/api/metric/__init__.py +1 -1
  20. evalscope/api/metric/metric.py +6 -1
  21. evalscope/api/metric/scorer.py +15 -7
  22. evalscope/api/mixin/__init__.py +1 -1
  23. evalscope/api/mixin/llm_judge_mixin.py +2 -0
  24. evalscope/api/mixin/sandbox_mixin.py +182 -0
  25. evalscope/api/model/generate_config.py +10 -6
  26. evalscope/api/model/model.py +5 -2
  27. evalscope/api/tool/tool_info.py +1 -1
  28. evalscope/app/app.py +3 -0
  29. evalscope/app/ui/multi_model.py +6 -1
  30. evalscope/app/ui/single_model.py +11 -5
  31. evalscope/app/utils/data_utils.py +8 -7
  32. evalscope/app/utils/env_utils.py +12 -0
  33. evalscope/app/utils/text_utils.py +14 -12
  34. evalscope/app/utils/visualization.py +2 -2
  35. evalscope/arguments.py +8 -4
  36. evalscope/backend/opencompass/backend_manager.py +0 -2
  37. evalscope/backend/rag_eval/utils/embedding.py +9 -1
  38. evalscope/benchmarks/aa_lcr/aa_lcr_adapter.py +205 -0
  39. evalscope/benchmarks/ai2d/ai2d_adapter.py +54 -0
  40. evalscope/benchmarks/aime/aime24_adapter.py +5 -0
  41. evalscope/benchmarks/aime/aime25_adapter.py +136 -1
  42. evalscope/benchmarks/aime/grader.py +307 -0
  43. evalscope/benchmarks/aime/math_normalize.py +189 -0
  44. evalscope/benchmarks/amc/amc_adapter.py +51 -0
  45. evalscope/benchmarks/arena_hard/arena_hard_adapter.py +1 -0
  46. evalscope/benchmarks/bbh/bbh_adapter.py +43 -17
  47. evalscope/benchmarks/bfcl/{bfcl_adapter.py → v3/bfcl_v3_adapter.py} +131 -19
  48. evalscope/benchmarks/bfcl/{generation.py → v3/generation.py} +9 -9
  49. evalscope/benchmarks/bfcl/v3/utils.py +23 -0
  50. evalscope/benchmarks/bfcl/v4/__init__.py +0 -0
  51. evalscope/benchmarks/bfcl/v4/bfcl_v4_adapter.py +229 -0
  52. evalscope/benchmarks/bfcl/v4/utils.py +410 -0
  53. evalscope/benchmarks/biomix_qa/__init__.py +0 -0
  54. evalscope/benchmarks/biomix_qa/biomix_qa_adapter.py +36 -0
  55. evalscope/benchmarks/blink/__init__.py +0 -0
  56. evalscope/benchmarks/blink/blink_adapter.py +61 -0
  57. evalscope/benchmarks/ceval/ceval_adapter.py +1 -2
  58. evalscope/benchmarks/chartqa/__init__.py +0 -0
  59. evalscope/benchmarks/chartqa/chartqa_adapter.py +80 -0
  60. evalscope/benchmarks/chartqa/utils.py +38 -0
  61. evalscope/benchmarks/coin_flip/__init__.py +0 -0
  62. evalscope/benchmarks/coin_flip/coin_flip_adapter.py +128 -0
  63. evalscope/benchmarks/commonsense_qa/__init__.py +0 -0
  64. evalscope/benchmarks/commonsense_qa/commonsense_qa_adapter.py +32 -0
  65. evalscope/benchmarks/competition_math/competition_math_adapter.py +5 -0
  66. evalscope/benchmarks/data_collection/data_collection_adapter.py +24 -19
  67. evalscope/benchmarks/docvqa/__init__.py +0 -0
  68. evalscope/benchmarks/docvqa/docvqa_adapter.py +67 -0
  69. evalscope/benchmarks/drivelology/__init__.py +0 -0
  70. evalscope/benchmarks/drivelology/drivelology_binary_adapter.py +170 -0
  71. evalscope/benchmarks/drivelology/drivelology_multilabel_adapter.py +254 -0
  72. evalscope/benchmarks/drivelology/drivelology_selection_adapter.py +49 -0
  73. evalscope/benchmarks/drivelology/drivelology_writing_adapter.py +218 -0
  74. evalscope/benchmarks/drop/drop_adapter.py +15 -44
  75. evalscope/benchmarks/drop/utils.py +97 -0
  76. evalscope/benchmarks/frames/frames_adapter.py +2 -1
  77. evalscope/benchmarks/general_arena/general_arena_adapter.py +7 -2
  78. evalscope/benchmarks/general_arena/utils.py +2 -1
  79. evalscope/benchmarks/general_mcq/general_mcq_adapter.py +1 -1
  80. evalscope/benchmarks/general_qa/general_qa_adapter.py +1 -1
  81. evalscope/benchmarks/gsm8k/gsm8k_adapter.py +25 -9
  82. evalscope/benchmarks/hallusion_bench/__init__.py +0 -0
  83. evalscope/benchmarks/hallusion_bench/hallusion_bench_adapter.py +159 -0
  84. evalscope/benchmarks/halu_eval/__init__.py +0 -0
  85. evalscope/benchmarks/halu_eval/halu_eval_adapter.py +128 -0
  86. evalscope/benchmarks/halu_eval/halu_eval_instructions.py +84 -0
  87. evalscope/benchmarks/healthbench/__init__.py +0 -0
  88. evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
  89. evalscope/benchmarks/healthbench/utils.py +102 -0
  90. evalscope/benchmarks/hle/hle_adapter.py +3 -2
  91. evalscope/benchmarks/humaneval/humaneval_adapter.py +24 -52
  92. evalscope/benchmarks/humaneval/utils.py +235 -0
  93. evalscope/benchmarks/ifeval/instructions_util.py +2 -3
  94. evalscope/benchmarks/image_edit/__init__.py +0 -0
  95. evalscope/benchmarks/image_edit/gedit/__init__.py +0 -0
  96. evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
  97. evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
  98. evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
  99. evalscope/benchmarks/infovqa/__init__.py +0 -0
  100. evalscope/benchmarks/infovqa/infovqa_adapter.py +66 -0
  101. evalscope/benchmarks/live_code_bench/evaluate_utils.py +13 -6
  102. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +66 -54
  103. evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
  104. evalscope/benchmarks/logi_qa/__int__.py +0 -0
  105. evalscope/benchmarks/logi_qa/logi_qa_adapter.py +41 -0
  106. evalscope/benchmarks/math_500/math_500_adapter.py +5 -1
  107. evalscope/benchmarks/math_qa/__init__.py +0 -0
  108. evalscope/benchmarks/math_qa/math_qa_adapter.py +35 -0
  109. evalscope/benchmarks/math_verse/__init__.py +0 -0
  110. evalscope/benchmarks/math_verse/math_verse_adapter.py +105 -0
  111. evalscope/benchmarks/math_vision/__init__.py +0 -0
  112. evalscope/benchmarks/math_vision/math_vision_adapter.py +116 -0
  113. evalscope/benchmarks/math_vista/__init__.py +0 -0
  114. evalscope/benchmarks/math_vista/math_vista_adapter.py +114 -0
  115. evalscope/benchmarks/med_mcqa/__init__.py +0 -0
  116. evalscope/benchmarks/med_mcqa/med_mcqa_adapter.py +32 -0
  117. evalscope/benchmarks/minerva_math/__init__.py +0 -0
  118. evalscope/benchmarks/minerva_math/minerva_math_adapter.py +53 -0
  119. evalscope/benchmarks/mm_bench/__init__.py +0 -0
  120. evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
  121. evalscope/benchmarks/mm_star/__init__.py +0 -0
  122. evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
  123. evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +1 -1
  124. evalscope/benchmarks/mmmu/__init__.py +0 -0
  125. evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
  126. evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
  127. evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +124 -0
  128. evalscope/benchmarks/mri_mcqa/__init__.py +0 -0
  129. evalscope/benchmarks/mri_mcqa/mri_mcqa_adapter.py +34 -0
  130. evalscope/benchmarks/multi_if/__init__.py +0 -0
  131. evalscope/benchmarks/multi_if/ifeval.py +3354 -0
  132. evalscope/benchmarks/multi_if/metrics.py +120 -0
  133. evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
  134. evalscope/benchmarks/music_trivia/__init__.py +0 -0
  135. evalscope/benchmarks/music_trivia/music_trivia_adapter.py +36 -0
  136. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +7 -6
  137. evalscope/benchmarks/ner/__init__.py +0 -0
  138. evalscope/benchmarks/ner/broad_twitter_corpus_adapter.py +52 -0
  139. evalscope/benchmarks/ner/conll2003_adapter.py +48 -0
  140. evalscope/benchmarks/ner/copious_adapter.py +85 -0
  141. evalscope/benchmarks/ner/cross_ner_adapter.py +120 -0
  142. evalscope/benchmarks/ner/cross_ner_entities/__init__.py +0 -0
  143. evalscope/benchmarks/ner/cross_ner_entities/ai.py +54 -0
  144. evalscope/benchmarks/ner/cross_ner_entities/literature.py +36 -0
  145. evalscope/benchmarks/ner/cross_ner_entities/music.py +39 -0
  146. evalscope/benchmarks/ner/cross_ner_entities/politics.py +37 -0
  147. evalscope/benchmarks/ner/cross_ner_entities/science.py +58 -0
  148. evalscope/benchmarks/ner/genia_ner_adapter.py +66 -0
  149. evalscope/benchmarks/ner/harvey_ner_adapter.py +58 -0
  150. evalscope/benchmarks/ner/mit_movie_trivia_adapter.py +74 -0
  151. evalscope/benchmarks/ner/mit_restaurant_adapter.py +66 -0
  152. evalscope/benchmarks/ner/ontonotes5_adapter.py +87 -0
  153. evalscope/benchmarks/ner/wnut2017_adapter.py +61 -0
  154. evalscope/benchmarks/ocr_bench/__init__.py +0 -0
  155. evalscope/benchmarks/ocr_bench/ocr_bench/__init__.py +0 -0
  156. evalscope/benchmarks/ocr_bench/ocr_bench/ocr_bench_adapter.py +101 -0
  157. evalscope/benchmarks/ocr_bench/ocr_bench_v2/IoUscore_metric.py +87 -0
  158. evalscope/benchmarks/ocr_bench/ocr_bench_v2/TEDS_metric.py +963 -0
  159. evalscope/benchmarks/ocr_bench/ocr_bench_v2/__init__.py +0 -0
  160. evalscope/benchmarks/ocr_bench/ocr_bench_v2/ocr_bench_v2_adapter.py +161 -0
  161. evalscope/benchmarks/ocr_bench/ocr_bench_v2/page_ocr_metric.py +50 -0
  162. evalscope/benchmarks/ocr_bench/ocr_bench_v2/parallel.py +46 -0
  163. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/__init__.py +0 -0
  164. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/readme.txt +26 -0
  165. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +537 -0
  166. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/script.py +481 -0
  167. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_metric.py +179 -0
  168. evalscope/benchmarks/ocr_bench/ocr_bench_v2/utils.py +433 -0
  169. evalscope/benchmarks/ocr_bench/ocr_bench_v2/vqa_metric.py +254 -0
  170. evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
  171. evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
  172. evalscope/benchmarks/olympiad_bench/utils.py +565 -0
  173. evalscope/benchmarks/omni_bench/__init__.py +0 -0
  174. evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
  175. evalscope/benchmarks/omnidoc_bench/__init__.py +0 -0
  176. evalscope/benchmarks/omnidoc_bench/end2end_eval.py +349 -0
  177. evalscope/benchmarks/omnidoc_bench/metrics.py +547 -0
  178. evalscope/benchmarks/omnidoc_bench/omnidoc_bench_adapter.py +135 -0
  179. evalscope/benchmarks/omnidoc_bench/utils.py +1937 -0
  180. evalscope/benchmarks/piqa/__init__.py +0 -0
  181. evalscope/benchmarks/piqa/piqa_adapter.py +32 -0
  182. evalscope/benchmarks/poly_math/__init__.py +0 -0
  183. evalscope/benchmarks/poly_math/poly_math_adapter.py +132 -0
  184. evalscope/benchmarks/poly_math/utils/instruction.py +105 -0
  185. evalscope/benchmarks/pope/__init__.py +0 -0
  186. evalscope/benchmarks/pope/pope_adapter.py +112 -0
  187. evalscope/benchmarks/process_bench/process_bench_adapter.py +1 -0
  188. evalscope/benchmarks/pumed_qa/__init__.py +0 -0
  189. evalscope/benchmarks/pumed_qa/pubmed_qa_adapter.py +175 -0
  190. evalscope/benchmarks/qasc/__init__.py +0 -0
  191. evalscope/benchmarks/qasc/qasc_adapter.py +35 -0
  192. evalscope/benchmarks/real_world_qa/__init__.py +0 -0
  193. evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
  194. evalscope/benchmarks/sciq/__init__.py +0 -0
  195. evalscope/benchmarks/sciq/sciq_adapter.py +36 -0
  196. evalscope/benchmarks/seed_bench_2_plus/__init__.py +0 -0
  197. evalscope/benchmarks/seed_bench_2_plus/seed_bench_2_plus_adapter.py +72 -0
  198. evalscope/benchmarks/simple_qa/simple_qa_adapter.py +1 -1
  199. evalscope/benchmarks/simple_vqa/__init__.py +0 -0
  200. evalscope/benchmarks/simple_vqa/simple_vqa_adapter.py +169 -0
  201. evalscope/benchmarks/siqa/__init__.py +0 -0
  202. evalscope/benchmarks/siqa/siqa_adapter.py +39 -0
  203. evalscope/benchmarks/tau_bench/tau2_bench/__init__.py +0 -0
  204. evalscope/benchmarks/tau_bench/tau2_bench/generation.py +158 -0
  205. evalscope/benchmarks/tau_bench/tau2_bench/tau2_bench_adapter.py +146 -0
  206. evalscope/benchmarks/tau_bench/tau_bench/__init__.py +0 -0
  207. evalscope/benchmarks/tau_bench/{generation.py → tau_bench/generation.py} +1 -1
  208. evalscope/benchmarks/tau_bench/{tau_bench_adapter.py → tau_bench/tau_bench_adapter.py} +29 -29
  209. evalscope/benchmarks/text2image/__init__.py +0 -0
  210. evalscope/benchmarks/{aigc/t2i → text2image}/evalmuse_adapter.py +3 -1
  211. evalscope/benchmarks/{aigc/t2i → text2image}/genai_bench_adapter.py +2 -2
  212. evalscope/benchmarks/{aigc/t2i → text2image}/general_t2i_adapter.py +1 -1
  213. evalscope/benchmarks/{aigc/t2i → text2image}/hpdv2_adapter.py +7 -2
  214. evalscope/benchmarks/{aigc/t2i → text2image}/tifa_adapter.py +1 -0
  215. evalscope/benchmarks/tool_bench/tool_bench_adapter.py +3 -3
  216. evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +1 -2
  217. evalscope/benchmarks/visu_logic/__init__.py +0 -0
  218. evalscope/benchmarks/visu_logic/visu_logic_adapter.py +75 -0
  219. evalscope/benchmarks/wmt/__init__.py +0 -0
  220. evalscope/benchmarks/wmt/wmt24_adapter.py +294 -0
  221. evalscope/benchmarks/zerobench/__init__.py +0 -0
  222. evalscope/benchmarks/zerobench/zerobench_adapter.py +64 -0
  223. evalscope/cli/start_app.py +7 -1
  224. evalscope/cli/start_perf.py +7 -1
  225. evalscope/config.py +103 -18
  226. evalscope/constants.py +18 -0
  227. evalscope/evaluator/evaluator.py +138 -82
  228. evalscope/metrics/bert_score/__init__.py +0 -0
  229. evalscope/metrics/bert_score/scorer.py +338 -0
  230. evalscope/metrics/bert_score/utils.py +697 -0
  231. evalscope/metrics/llm_judge.py +19 -7
  232. evalscope/metrics/math_parser.py +14 -0
  233. evalscope/metrics/metric.py +317 -13
  234. evalscope/metrics/metrics.py +37 -0
  235. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +0 -0
  236. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +0 -0
  237. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +0 -0
  238. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +0 -0
  239. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +0 -0
  240. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +0 -0
  241. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +0 -0
  242. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +0 -0
  243. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +0 -0
  244. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +0 -0
  245. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +2 -6
  246. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +2 -6
  247. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +2 -6
  248. evalscope/models/image_edit_model.py +125 -0
  249. evalscope/models/model_apis.py +22 -0
  250. evalscope/models/openai_compatible.py +21 -0
  251. evalscope/models/text2image_model.py +2 -2
  252. evalscope/models/utils/openai.py +16 -6
  253. evalscope/perf/arguments.py +26 -4
  254. evalscope/perf/benchmark.py +76 -89
  255. evalscope/perf/http_client.py +31 -16
  256. evalscope/perf/main.py +15 -2
  257. evalscope/perf/plugin/api/base.py +9 -7
  258. evalscope/perf/plugin/api/custom_api.py +13 -58
  259. evalscope/perf/plugin/api/default_api.py +188 -79
  260. evalscope/perf/plugin/api/openai_api.py +85 -20
  261. evalscope/perf/plugin/datasets/base.py +21 -0
  262. evalscope/perf/plugin/datasets/custom.py +2 -3
  263. evalscope/perf/plugin/datasets/flickr8k.py +2 -2
  264. evalscope/perf/plugin/datasets/kontext_bench.py +2 -2
  265. evalscope/perf/plugin/datasets/line_by_line.py +2 -3
  266. evalscope/perf/plugin/datasets/longalpaca.py +2 -3
  267. evalscope/perf/plugin/datasets/openqa.py +2 -4
  268. evalscope/perf/plugin/datasets/random_dataset.py +1 -3
  269. evalscope/perf/plugin/datasets/random_vl_dataset.py +2 -2
  270. evalscope/perf/utils/benchmark_util.py +43 -27
  271. evalscope/perf/utils/db_util.py +14 -19
  272. evalscope/perf/utils/local_server.py +3 -44
  273. evalscope/perf/utils/log_utils.py +21 -6
  274. evalscope/report/__init__.py +13 -3
  275. evalscope/report/combinator.py +91 -20
  276. evalscope/report/generator.py +8 -87
  277. evalscope/report/report.py +8 -4
  278. evalscope/run.py +13 -5
  279. evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -4
  280. evalscope/utils/argument_utils.py +1 -1
  281. evalscope/utils/chat_service.py +1 -1
  282. evalscope/utils/function_utils.py +249 -12
  283. evalscope/utils/import_utils.py +73 -1
  284. evalscope/utils/io_utils.py +132 -7
  285. evalscope/utils/json_schema.py +25 -2
  286. evalscope/utils/logger.py +69 -18
  287. evalscope/utils/model_utils.py +4 -3
  288. evalscope/utils/multi_choices.py +39 -7
  289. evalscope/utils/ner.py +377 -0
  290. evalscope/version.py +2 -2
  291. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/METADATA +252 -408
  292. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/RECORD +290 -154
  293. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/WHEEL +1 -1
  294. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/top_level.txt +0 -1
  295. evalscope/api/mixin/dataset_mixin.py +0 -105
  296. evalscope/benchmarks/aigc/i2i/general_i2i_adapter.py +0 -44
  297. tests/__init__.py +0 -1
  298. tests/aigc/__init__.py +0 -1
  299. tests/aigc/test_t2i.py +0 -142
  300. tests/benchmark/__init__.py +0 -1
  301. tests/benchmark/test_eval.py +0 -386
  302. tests/cli/__init__.py +0 -1
  303. tests/cli/test_all.py +0 -229
  304. tests/cli/test_collection.py +0 -96
  305. tests/cli/test_custom.py +0 -268
  306. tests/perf/__init__.py +0 -1
  307. tests/perf/test_perf.py +0 -176
  308. tests/rag/test_clip_benchmark.py +0 -90
  309. tests/rag/test_mteb.py +0 -213
  310. tests/rag/test_ragas.py +0 -128
  311. tests/swift/__init__.py +0 -1
  312. tests/swift/test_run_swift_eval.py +0 -146
  313. tests/swift/test_run_swift_vlm_eval.py +0 -128
  314. tests/swift/test_run_swift_vlm_jugde_eval.py +0 -157
  315. tests/test_run_all.py +0 -12
  316. tests/utils.py +0 -13
  317. tests/vlm/__init__.py +0 -1
  318. tests/vlm/test_vlmeval.py +0 -102
  319. /evalscope/benchmarks/{aigc → aa_lcr}/__init__.py +0 -0
  320. /evalscope/benchmarks/{aigc/i2i → ai2d}/__init__.py +0 -0
  321. /evalscope/benchmarks/{aigc/t2i → amc}/__init__.py +0 -0
  322. {tests/rag → evalscope/benchmarks/bfcl/v3}/__init__.py +0 -0
  323. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/entry_points.txt +0 -0
  324. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info/licenses}/LICENSE +0 -0
@@ -1,3 +1,4 @@
1
+ # flake8: noqa: E501
1
2
  from typing import Any, Dict
2
3
 
3
4
  from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
@@ -7,7 +8,7 @@ from evalscope.api.messages.chat_message import ChatMessageUser
7
8
  from evalscope.api.metric import Score
8
9
  from evalscope.api.registry import register_benchmark
9
10
  from evalscope.constants import Tags
10
- from evalscope.utils.io_utils import convert_numpy_types
11
+ from evalscope.utils.io_utils import convert_normal_types
11
12
  from evalscope.utils.logger import get_logger
12
13
 
13
14
  logger = get_logger()
@@ -19,17 +20,18 @@ logger = get_logger()
19
20
  pretty_name='Live-Code-Bench',
20
21
  tags=[Tags.CODING],
21
22
  description=
22
- 'Live Code Bench is a benchmark for evaluating code generation models on real-world coding tasks. It includes a variety of programming problems with test cases to assess the model\'s ability to generate correct and efficient code solutions.', # noqa: E501
23
+ 'Live Code Bench is a benchmark for evaluating code generation models on real-world coding tasks. It includes a variety of programming problems with test cases to assess the model\'s ability to generate correct and efficient code solutions. '
24
+ '**By default the code is executed in local environment. We recommend using sandbox execution to safely run and evaluate the generated code, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/sandbox.html) for more details.**',
23
25
  dataset_id='AI-ModelScope/code_generation_lite',
24
26
  subset_list=['release_latest'],
25
- metric_list=['Pass@1'],
27
+ aggregation='mean_and_pass_at_k',
26
28
  eval_split='test',
27
29
  prompt_template=
28
30
  '### Question:\n{question_content}\n\n{format_prompt} ### Answer: (use the provided format with backticks)\n\n',
31
+ review_timeout=6,
29
32
  extra_params={
30
33
  'start_date': None,
31
34
  'end_date': None,
32
- 'timeout': 6,
33
35
  'debug': False
34
36
  },
35
37
  )
@@ -42,7 +44,6 @@ class LiveCodeBenchAdapter(DefaultDataAdapter):
42
44
  def __init__(self, **kwargs):
43
45
  super().__init__(**kwargs)
44
46
 
45
- self.timeout = self.extra_params.get('timeout', 6)
46
47
  self.debug = self.extra_params.get('debug', False)
47
48
  self.start_date = self.extra_params.get('start_date')
48
49
  self.end_date = self.extra_params.get('end_date')
@@ -81,58 +82,69 @@ class LiveCodeBenchAdapter(DefaultDataAdapter):
81
82
  def match_score(
82
83
  self, original_prediction: str, filtered_prediction: str, reference: str, task_state: TaskState
83
84
  ) -> Score:
84
- from .evaluate_utils import codegen_metrics
85
-
86
85
  score = Score(
87
86
  extracted_prediction=filtered_prediction,
88
87
  prediction=original_prediction,
89
88
  )
90
89
 
91
- references = [{'input_output': task_state.metadata['evaluation_sample']}]
92
- predictions = [[filtered_prediction]]
93
-
94
- try:
95
- metrics, eval_results, final_metadata = codegen_metrics(
96
- references,
97
- predictions,
98
- k_list=[1],
99
- num_process_evaluate=1,
100
- timeout=self.timeout,
101
- debug=self.debug,
102
- )
103
- pass_rate = metrics['pass@1'] / 100 # convert to point scale
104
-
105
- score.value = {'pass': float(pass_rate > 0)}
106
- score.explanation = f"Pass@1: {metrics['pass@1']}%"
107
-
108
- # Convert numpy types to native Python types for JSON serialization
109
- serializable_eval_results = convert_numpy_types(eval_results)
110
- serializable_final_metadata = convert_numpy_types(final_metadata)
111
-
112
- score.metadata = {
113
- 'pass_rate': float(pass_rate),
114
- 'timeout': self.timeout,
115
- 'debug': self.debug,
116
- 'eval_results': serializable_eval_results,
117
- 'final_metadata': serializable_final_metadata
118
- }
119
- except Exception as e:
120
- score.value = {'pass': False}
121
- score.explanation = f'Evaluation failed: {str(e)}'
122
- score.metadata = {'error': str(e)}
123
-
124
- score.main_score_name = 'pass'
90
+ if not self.use_sandbox:
91
+ # Use original evaluation method
92
+ from .evaluate_utils import codegen_metrics
93
+
94
+ references = [{'input_output': task_state.metadata['evaluation_sample']}]
95
+ predictions = [[filtered_prediction]]
96
+
97
+ try:
98
+ metrics, eval_results, final_metadata = codegen_metrics(
99
+ references,
100
+ predictions,
101
+ k_list=[1],
102
+ num_process_evaluate=1,
103
+ timeout=self.review_timeout,
104
+ debug=self.debug,
105
+ )
106
+ pass_rate = metrics['pass@1'] / 100 # convert to point scale
107
+
108
+ score.value = {'acc': float(pass_rate > 0)}
109
+ score.explanation = f"Pass@1: {metrics['pass@1']}%"
110
+
111
+ # Convert numpy types to native Python types for JSON serialization
112
+ serializable_eval_results = convert_normal_types(eval_results)
113
+ serializable_final_metadata = convert_normal_types(final_metadata)
114
+
115
+ score.metadata = {
116
+ 'pass_rate': float(pass_rate),
117
+ 'timeout': self.review_timeout,
118
+ 'debug': self.debug,
119
+ 'eval_results': serializable_eval_results,
120
+ 'final_metadata': serializable_final_metadata
121
+ }
122
+ except Exception as e:
123
+ score.value = {'acc': False}
124
+ score.explanation = f'Evaluation failed: {str(e)}'
125
+ score.metadata = {'error': str(e)}
126
+ else:
127
+ # Use sandbox execution
128
+ try:
129
+ from .sandbox_evaluate_utils import evaluate_in_sandbox
130
+
131
+ evaluation_sample = task_state.metadata['evaluation_sample']
132
+ passed, detailed_results = evaluate_in_sandbox(
133
+ self, filtered_prediction, evaluation_sample, timeout=self.review_timeout, debug=self.debug
134
+ )
135
+
136
+ score.value = {'acc': passed}
137
+ score.explanation = f"Sandbox execution: {'Passed' if passed else 'Failed'}"
138
+ score.metadata = {
139
+ 'timeout': self.review_timeout,
140
+ 'debug': self.debug,
141
+ 'execution_method': 'sandbox',
142
+ 'detailed_results': detailed_results
143
+ }
144
+ except Exception as e:
145
+ score.value = {'acc': False}
146
+ score.explanation = f'Sandbox evaluation failed: {str(e)}'
147
+ score.metadata = {'error': str(e), 'execution_method': 'sandbox'}
148
+
149
+ score.main_score_name = 'acc'
125
150
  return score
126
-
127
- def aggregate_scores(self, sample_scores):
128
- from evalscope.metrics.metric import PassAtK
129
-
130
- # calculate pass@k here
131
- agg_list = []
132
- for metric in self.metric_list:
133
- if metric.lower().startswith('pass@'):
134
- k = int(metric.split('@')[1])
135
- # Get the scores for this metric
136
- agg = PassAtK(k)
137
- agg_list.extend(agg(sample_scores))
138
- return agg_list
@@ -0,0 +1,220 @@
1
+ import json
2
+ from typing import TYPE_CHECKING, Dict, List, Tuple
3
+
4
+ from evalscope.utils.logger import get_logger
5
+
6
+ if TYPE_CHECKING:
7
+ from evalscope.api.mixin.sandbox_mixin import SandboxMixin
8
+
9
+ logger = get_logger()
10
+
11
+
12
+ def evaluate_in_sandbox(
13
+ adapter: 'SandboxMixin',
14
+ code: str,
15
+ evaluation_sample: str,
16
+ timeout: int = 6,
17
+ debug: bool = False
18
+ ) -> Tuple[bool, Dict]:
19
+ """
20
+ Evaluate code in sandbox environment for Live Code Bench.
21
+
22
+ Args:
23
+ adapter: The adapter instance with sandbox capabilities
24
+ code: The code to evaluate
25
+ evaluation_sample: JSON string containing input/output test cases
26
+ timeout: Timeout for execution
27
+ debug: Whether to enable debug logging
28
+
29
+ Returns:
30
+ Tuple[bool, Dict]: (overall_pass, detailed_results)
31
+ """
32
+ try:
33
+ # Parse the evaluation sample
34
+ test_data = json.loads(evaluation_sample)
35
+ inputs = test_data.get('inputs', [])
36
+ outputs = test_data.get('outputs', [])
37
+ fn_name = test_data.get('fn_name')
38
+
39
+ if debug:
40
+ logger.info(f'Evaluating code with {len(inputs)} test cases')
41
+ logger.info(f'Function name: {fn_name}')
42
+
43
+ # Determine if this is call-based or stdio-based
44
+ if fn_name:
45
+ # Call-based evaluation
46
+ return _evaluate_call_based_in_sandbox(adapter, code, inputs, outputs, fn_name, timeout, debug)
47
+ else:
48
+ # Standard input/output evaluation
49
+ return _evaluate_stdio_in_sandbox(adapter, code, inputs, outputs, timeout, debug)
50
+
51
+ except Exception as e:
52
+ if debug:
53
+ logger.error(f'Sandbox evaluation error: {str(e)}')
54
+ return False, {'error': str(e), 'total_tests': 0, 'passed_tests': 0}
55
+
56
+
57
+ def _evaluate_call_based_in_sandbox(
58
+ adapter: 'SandboxMixin', code: str, inputs: list, outputs: list, fn_name: str, timeout: int, debug: bool
59
+ ) -> Tuple[bool, Dict]:
60
+ """Evaluate call-based problems in sandbox."""
61
+ try:
62
+ all_passed = True
63
+ passed_count = 0
64
+ failed_cases = []
65
+
66
+ for i, (test_input, expected_output) in enumerate(zip(inputs, outputs)):
67
+ # Prepare individual test code for each test case
68
+ test_code = f"""
69
+ import json
70
+ import sys
71
+
72
+ # User's code
73
+ {code}
74
+
75
+ # Test execution for single test case
76
+ try:
77
+ test_input = {repr(test_input)}
78
+ expected_output = {repr(expected_output)}
79
+
80
+ if 'class Solution' in '''{code}''':
81
+ # LeetCode style
82
+ solution = Solution()
83
+ method = getattr(solution, '{fn_name}')
84
+ else:
85
+ # Function is directly available
86
+ method = {fn_name}
87
+
88
+ # Parse input if it's JSON string
89
+ if isinstance(test_input, str):
90
+ try:
91
+ test_input = json.loads(test_input)
92
+ except:
93
+ pass # Keep as string if not valid JSON
94
+
95
+ # Call the method
96
+ if isinstance(test_input, list):
97
+ result = method(*test_input)
98
+ else:
99
+ result = method(test_input)
100
+
101
+ # Parse expected output if it's JSON string
102
+ if isinstance(expected_output, str):
103
+ try:
104
+ expected_output = json.loads(expected_output)
105
+ except:
106
+ pass # Keep as string if not valid JSON
107
+
108
+ # Convert tuple to list for comparison
109
+ if isinstance(result, tuple):
110
+ result = list(result)
111
+
112
+ if result == expected_output:
113
+ print("TEST_PASSED")
114
+ else:
115
+ print(f"TEST_FAILED: expected {{expected_output}}, got {{result}}")
116
+
117
+ except Exception as e:
118
+ print(f"EXECUTION_ERROR: {{str(e)}}")
119
+ import traceback
120
+ traceback.print_exc()
121
+ """
122
+
123
+ # Execute in sandbox
124
+ result = adapter.execute_code_in_sandbox(code=test_code, timeout=timeout, language='python')
125
+
126
+ if debug:
127
+ logger.info(f'Test case {i} execution result: {result}')
128
+
129
+ # Check if execution was successful and test passed
130
+ if result.get('status') == 'success':
131
+ output = result.get('output', '')
132
+ if 'TEST_PASSED' in output:
133
+ passed_count += 1
134
+ elif 'TEST_FAILED:' in output:
135
+ # Extract failure details from output
136
+ for line in output.split('\n'):
137
+ if line.startswith('TEST_FAILED:'):
138
+ failed_cases.append(f"Test {i}: {line.replace('TEST_FAILED: ', '')}")
139
+ break
140
+ all_passed = False
141
+ break
142
+ elif 'EXECUTION_ERROR:' in output:
143
+ # Extract error details
144
+ for line in output.split('\n'):
145
+ if line.startswith('EXECUTION_ERROR:'):
146
+ failed_cases.append(f'Test {i}: {line}')
147
+ break
148
+ all_passed = False
149
+ break
150
+ else:
151
+ failed_cases.append(f'Test {i}: Unknown error in output. Result: {result}')
152
+ all_passed = False
153
+ break
154
+ else:
155
+ failed_cases.append(f'Test {i}: Sandbox execution failed - Result: {result}')
156
+ all_passed = False
157
+ break
158
+
159
+ detailed_results = {'total_tests': len(inputs), 'passed_tests': passed_count, 'failed_cases': failed_cases}
160
+
161
+ return all_passed, detailed_results
162
+
163
+ except Exception as e:
164
+ if debug:
165
+ logger.error(f'Call-based evaluation error: {str(e)}')
166
+ return False, {'error': str(e), 'total_tests': len(inputs), 'passed_tests': 0}
167
+
168
+
169
+ def _evaluate_stdio_in_sandbox(
170
+ adapter: 'SandboxMixin', code: str, inputs: list, outputs: list, timeout: int, debug: bool
171
+ ) -> Tuple[bool, Dict]:
172
+ """Evaluate stdio-based problems in sandbox."""
173
+ try:
174
+ all_passed = True
175
+ passed_count = 0
176
+ failed_cases = []
177
+
178
+ for i, (test_input, expected_output) in enumerate(zip(inputs, outputs)):
179
+ test_code = f"""
180
+ import sys
181
+ from io import StringIO
182
+
183
+ # Redirect stdin
184
+ sys.stdin = StringIO('''{test_input}''')
185
+
186
+ # User's code
187
+ {code}
188
+ """
189
+
190
+ # Execute in sandbox
191
+ result = adapter.execute_code_in_sandbox(code=test_code, timeout=timeout, language='python')
192
+
193
+ if result.get('status') != 'success':
194
+ if debug:
195
+ logger.error(f'Test case {i} execution failed: {result}')
196
+ failed_cases.append(f'Test {i}: Execution error - Result: {result}')
197
+ all_passed = False
198
+ break
199
+
200
+ # Compare output
201
+ actual_output = result.get('output', '').strip()
202
+ expected_output = expected_output.strip()
203
+
204
+ if actual_output == expected_output:
205
+ passed_count += 1
206
+ else:
207
+ if debug:
208
+ logger.info(f"Test case {i} failed: expected '{expected_output}', got '{actual_output}'")
209
+ failed_cases.append(f"Test {i}: Expected '{expected_output}', got '{actual_output}'")
210
+ all_passed = False
211
+ break
212
+
213
+ detailed_results = {'total_tests': len(inputs), 'passed_tests': passed_count, 'failed_cases': failed_cases}
214
+
215
+ return all_passed, detailed_results
216
+
217
+ except Exception as e:
218
+ if debug:
219
+ logger.error(f'Stdio evaluation error: {str(e)}')
220
+ return False, {'error': str(e), 'total_tests': len(inputs), 'passed_tests': 0}
File without changes
@@ -0,0 +1,41 @@
1
+ # flake8: noqa: E501
2
+
3
+ from evalscope.api.benchmark import BenchmarkMeta, MultiChoiceAdapter
4
+ from evalscope.api.dataset import Sample
5
+ from evalscope.api.registry import register_benchmark
6
+ from evalscope.constants import Tags
7
+
8
+ DESCRIPTION = 'LogiQA is a dataset sourced from expert-written questions for testing human Logical reasoning.'
9
+
10
+ PROMPT_TEMPLATE = r"""
11
+ Answer the following multiple choice question. The entire content of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of {letters}.
12
+
13
+ {question}
14
+
15
+ {choices}
16
+ """.strip()
17
+
18
+
19
+ @register_benchmark(
20
+ BenchmarkMeta(
21
+ name='logi_qa',
22
+ pretty_name='LogiQA',
23
+ tags=[Tags.REASONING, Tags.MULTIPLE_CHOICE],
24
+ description=DESCRIPTION.strip(),
25
+ dataset_id='extraordinarylab/logiqa',
26
+ metric_list=['acc'],
27
+ few_shot_num=0,
28
+ train_split='validation',
29
+ eval_split='test',
30
+ prompt_template=PROMPT_TEMPLATE,
31
+ )
32
+ )
33
+ class LogiQAAdapter(MultiChoiceAdapter):
34
+
35
+ def record_to_sample(self, record) -> Sample:
36
+ return Sample(
37
+ input=f"{record['context']}\n{record['question']}",
38
+ choices=record['choices'],
39
+ target=record['answer'],
40
+ metadata={},
41
+ )
@@ -4,7 +4,6 @@ from typing import Any, Dict
4
4
 
5
5
  from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
6
6
  from evalscope.api.dataset import Sample
7
- from evalscope.api.evaluator import TaskState
8
7
  from evalscope.api.registry import register_benchmark
9
8
  from evalscope.constants import Tags
10
9
  from evalscope.utils.logger import get_logger
@@ -49,3 +48,8 @@ class Math500Adapter(DefaultDataAdapter):
49
48
  'solution': record['solution'],
50
49
  },
51
50
  )
51
+
52
+ def extract_answer(self, prediction: str, task_state):
53
+ from evalscope.metrics.math_parser import extract_answer
54
+
55
+ return extract_answer(prediction)
File without changes
@@ -0,0 +1,35 @@
1
+ from evalscope.api.benchmark import BenchmarkMeta, MultiChoiceAdapter
2
+ from evalscope.api.dataset import Sample
3
+ from evalscope.api.registry import register_benchmark
4
+ from evalscope.constants import Tags
5
+ from evalscope.utils.multi_choices import MultipleChoiceTemplate
6
+
7
+ DESCRIPTION = (
8
+ 'MathQA dataset is gathered by using a new representation language to annotate over the '
9
+ 'AQuA-RAT dataset with fully-specified operational programs.'
10
+ )
11
+
12
+
13
+ @register_benchmark(
14
+ BenchmarkMeta(
15
+ name='math_qa',
16
+ pretty_name='MathQA',
17
+ tags=[Tags.REASONING, Tags.MATH, Tags.MULTIPLE_CHOICE],
18
+ description=DESCRIPTION.strip(),
19
+ dataset_id='extraordinarylab/math-qa',
20
+ metric_list=['acc'],
21
+ few_shot_num=0,
22
+ train_split=None,
23
+ eval_split='test',
24
+ prompt_template=MultipleChoiceTemplate.SINGLE_ANSWER_COT,
25
+ )
26
+ )
27
+ class MathQAAdapter(MultiChoiceAdapter):
28
+
29
+ def record_to_sample(self, record) -> Sample:
30
+ return Sample(
31
+ input=record['question'],
32
+ choices=record['choices'],
33
+ target=record['answer'],
34
+ metadata={'reasoning': record['reasoning']},
35
+ )
File without changes
@@ -0,0 +1,105 @@
1
+ # flake8: noqa: E501
2
+ from typing import Any, Dict
3
+
4
+ from evalscope.api.benchmark import BenchmarkMeta, VisionLanguageAdapter
5
+ from evalscope.api.dataset import Sample
6
+ from evalscope.api.messages import ChatMessageUser, Content, ContentImage, ContentText
7
+ from evalscope.api.registry import register_benchmark
8
+ from evalscope.constants import Tags
9
+ from evalscope.utils.io_utils import bytes_to_base64
10
+ from evalscope.utils.logger import get_logger
11
+
12
+ logger = get_logger()
13
+
14
+ MULTI_CHOICE_TYPE = 'multi-choice'
15
+ OPEN_TYPE = 'free-form'
16
+
17
+ OPEN_PROMPT = '{question}\nPlease reason step by step, and put your final answer within \\boxed{{}}.'
18
+
19
+ MULT_CHOICE_PROMPT = """
20
+ Answer the following multiple choice question. The last line of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of A, B, C, D. Think step by step before answering.
21
+
22
+ {question}
23
+ """
24
+
25
+ SUBSET_LIST = ['Text Dominant', 'Text Lite', 'Vision Intensive', 'Vision Dominant', 'Vision Only']
26
+
27
+
28
+ @register_benchmark(
29
+ BenchmarkMeta(
30
+ name='math_verse',
31
+ pretty_name='MathVerse',
32
+ dataset_id='evalscope/MathVerse',
33
+ tags=[Tags.MATH, Tags.REASONING, Tags.MULTIPLE_CHOICE, Tags.MULTI_MODAL],
34
+ description=
35
+ 'MathVerse, an all-around visual math benchmark designed for an equitable and in-depth evaluation of MLLMs. 2,612 high-quality, multi-subject math problems with diagrams from publicly available sources. Each problem is then transformed by human annotators into six distinct versions, each offering varying degrees of information content in multi-modality, contributing to 15K test samples in total. This approach allows MathVerse to comprehensively assess whether and how much MLLMs can truly understand the visual diagrams for mathematical reasoning.',
36
+ subset_list=SUBSET_LIST,
37
+ metric_list=[{
38
+ 'acc': {
39
+ 'numeric': True
40
+ }
41
+ }],
42
+ default_subset='testmini',
43
+ eval_split='testmini',
44
+ prompt_template=OPEN_PROMPT,
45
+ )
46
+ )
47
+ class MathVerseAdapter(VisionLanguageAdapter):
48
+
49
+ def __init__(self, **kwargs):
50
+ super().__init__(**kwargs)
51
+ self.reformat_subset = True
52
+ self._use_llm_judge = True
53
+
54
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
55
+ """
56
+ Convert a dataset record to a Sample. Unifies handling for both multi-choice and free-form.
57
+ Builds the content list inline and appends image content if provided.
58
+
59
+ Args:
60
+ record: Raw dataset record.
61
+
62
+ Returns:
63
+ Sample: The standardized sample ready for evaluation.
64
+ """
65
+ question_type = record.get('question_type', OPEN_TYPE)
66
+ question: str = record.get('question', '')
67
+ content_list: list[Content] = []
68
+
69
+ # Choose prompt text based on type; keep a single unified flow for creating Sample
70
+ if question_type == MULTI_CHOICE_TYPE:
71
+ prompt_text = MULT_CHOICE_PROMPT.format(question=question).strip()
72
+ else:
73
+ prompt_text = OPEN_PROMPT.format(question=question).strip()
74
+
75
+ content_list.append(ContentText(text=prompt_text))
76
+
77
+ # Append image if exists
78
+ image = record.get('image')
79
+ if image and isinstance(image, dict):
80
+ image_bytes = image.get('bytes')
81
+ if image_bytes:
82
+ image_base64 = bytes_to_base64(image_bytes, format='png', add_header=True)
83
+ content_list.append(ContentImage(image=image_base64))
84
+
85
+ metadata: Dict[str, Any] = {
86
+ 'sample_index': record.get('sample_index'),
87
+ 'problem_index': record.get('problem_index'),
88
+ 'problem_version': record.get('problem_version'),
89
+ 'question_type': question_type,
90
+ 'query_wo': record.get('query_wo'),
91
+ 'query_cot': record.get('query_cot'),
92
+ 'question_for_eval': record.get('question_for_eval'),
93
+ }
94
+
95
+ return Sample(
96
+ input=[ChatMessageUser(content=content_list)],
97
+ target=record['answer'],
98
+ subset_key=record['problem_version'],
99
+ metadata=metadata,
100
+ )
101
+
102
+ def extract_answer(self, prediction: str, task_state):
103
+ from evalscope.metrics.math_parser import extract_answer
104
+
105
+ return extract_answer(prediction)
File without changes