evalscope 0.17.1__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (302) hide show
  1. evalscope/__init__.py +4 -1
  2. evalscope/api/benchmark/__init__.py +3 -0
  3. evalscope/api/benchmark/adapters/__init__.py +5 -0
  4. evalscope/api/benchmark/adapters/default_data_adapter.py +684 -0
  5. evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
  6. evalscope/api/benchmark/adapters/multi_choice_adapter.py +83 -0
  7. evalscope/api/benchmark/adapters/text2image_adapter.py +156 -0
  8. evalscope/api/benchmark/adapters/vision_language_adapter.py +6 -0
  9. evalscope/api/benchmark/benchmark.py +356 -0
  10. evalscope/api/benchmark/meta.py +121 -0
  11. evalscope/api/dataset/__init__.py +2 -0
  12. evalscope/api/dataset/dataset.py +349 -0
  13. evalscope/api/dataset/loader.py +262 -0
  14. evalscope/api/dataset/utils.py +143 -0
  15. evalscope/api/evaluator/__init__.py +3 -0
  16. evalscope/api/evaluator/cache.py +378 -0
  17. evalscope/api/evaluator/evaluator.py +56 -0
  18. evalscope/api/evaluator/state.py +275 -0
  19. evalscope/api/filter/__init__.py +1 -0
  20. evalscope/api/filter/filter.py +72 -0
  21. evalscope/api/messages/__init__.py +12 -0
  22. evalscope/api/messages/chat_message.py +243 -0
  23. evalscope/api/messages/content.py +102 -0
  24. evalscope/api/messages/utils.py +35 -0
  25. evalscope/api/metric/__init__.py +2 -0
  26. evalscope/api/metric/metric.py +55 -0
  27. evalscope/api/metric/scorer.py +113 -0
  28. evalscope/api/mixin/__init__.py +1 -0
  29. evalscope/api/mixin/llm_judge_mixin.py +168 -0
  30. evalscope/api/model/__init__.py +12 -0
  31. evalscope/api/model/generate_config.py +155 -0
  32. evalscope/api/model/model.py +386 -0
  33. evalscope/api/model/model_output.py +285 -0
  34. evalscope/api/registry.py +182 -0
  35. evalscope/api/tool/__init__.py +3 -0
  36. evalscope/api/tool/tool_call.py +101 -0
  37. evalscope/api/tool/tool_info.py +173 -0
  38. evalscope/api/tool/utils.py +64 -0
  39. evalscope/app/app.py +3 -0
  40. evalscope/app/ui/app_ui.py +2 -1
  41. evalscope/app/ui/multi_model.py +50 -25
  42. evalscope/app/ui/single_model.py +26 -14
  43. evalscope/app/utils/data_utils.py +43 -27
  44. evalscope/app/utils/env_utils.py +12 -0
  45. evalscope/app/utils/text_utils.py +14 -14
  46. evalscope/app/utils/visualization.py +9 -4
  47. evalscope/arguments.py +7 -10
  48. evalscope/backend/opencompass/api_meta_template.py +2 -1
  49. evalscope/backend/opencompass/backend_manager.py +6 -5
  50. evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +10 -10
  51. evalscope/backend/rag_eval/clip_benchmark/task_template.py +8 -4
  52. evalscope/backend/rag_eval/ragas/task_template.py +2 -1
  53. evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +2 -1
  54. evalscope/backend/rag_eval/ragas/tasks/build_transform.py +7 -4
  55. evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +2 -1
  56. evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +2 -1
  57. evalscope/backend/rag_eval/utils/embedding.py +10 -1
  58. evalscope/backend/rag_eval/utils/llm.py +13 -12
  59. evalscope/benchmarks/__init__.py +0 -2
  60. evalscope/benchmarks/aime/aime24_adapter.py +38 -40
  61. evalscope/benchmarks/aime/aime25_adapter.py +34 -40
  62. evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +86 -60
  63. evalscope/benchmarks/arc/arc_adapter.py +34 -147
  64. evalscope/benchmarks/arena_hard/arena_hard_adapter.py +96 -70
  65. evalscope/benchmarks/arena_hard/utils.py +37 -1
  66. evalscope/benchmarks/bbh/bbh_adapter.py +72 -144
  67. evalscope/benchmarks/bfcl/bfcl_adapter.py +188 -171
  68. evalscope/benchmarks/bfcl/generation.py +222 -0
  69. evalscope/benchmarks/ceval/ceval_adapter.py +93 -162
  70. evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +85 -82
  71. evalscope/benchmarks/cmmlu/cmmlu_adapter.py +34 -125
  72. evalscope/benchmarks/competition_math/competition_math_adapter.py +56 -108
  73. evalscope/benchmarks/data_collection/data_collection_adapter.py +187 -45
  74. evalscope/benchmarks/docmath/docmath_adapter.py +109 -51
  75. evalscope/benchmarks/docmath/utils.py +4 -5
  76. evalscope/benchmarks/drop/drop_adapter.py +88 -40
  77. evalscope/benchmarks/frames/frames_adapter.py +136 -52
  78. evalscope/benchmarks/general_arena/general_arena_adapter.py +140 -98
  79. evalscope/benchmarks/general_arena/utils.py +23 -27
  80. evalscope/benchmarks/general_mcq/general_mcq_adapter.py +40 -101
  81. evalscope/benchmarks/general_qa/general_qa_adapter.py +73 -134
  82. evalscope/benchmarks/gpqa/gpqa_adapter.py +61 -100
  83. evalscope/benchmarks/gpqa/{chain_of_thought.txt → prompt.py} +12 -5
  84. evalscope/benchmarks/gsm8k/gsm8k_adapter.py +62 -142
  85. evalscope/benchmarks/hellaswag/hellaswag_adapter.py +35 -124
  86. evalscope/benchmarks/hle/hle_adapter.py +127 -93
  87. evalscope/benchmarks/humaneval/humaneval_adapter.py +86 -55
  88. evalscope/benchmarks/ifeval/ifeval_adapter.py +69 -40
  89. evalscope/benchmarks/ifeval/instructions.py +109 -64
  90. evalscope/benchmarks/ifeval/instructions_registry.py +1 -1
  91. evalscope/benchmarks/ifeval/instructions_util.py +2 -3
  92. evalscope/benchmarks/ifeval/utils.py +6 -7
  93. evalscope/benchmarks/image_edit/gedit/__init__.py +0 -0
  94. evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
  95. evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
  96. evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
  97. evalscope/benchmarks/iquiz/iquiz_adapter.py +30 -65
  98. evalscope/benchmarks/live_code_bench/evaluate_utils.py +2 -2
  99. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +121 -71
  100. evalscope/benchmarks/live_code_bench/load_utils.py +13 -21
  101. evalscope/benchmarks/live_code_bench/testing_util.py +6 -2
  102. evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +49 -75
  103. evalscope/benchmarks/math_500/math_500_adapter.py +41 -48
  104. evalscope/benchmarks/math_vista/__init__.py +0 -0
  105. evalscope/benchmarks/math_vista/math_vista_adapter.py +129 -0
  106. evalscope/benchmarks/mmlu/mmlu_adapter.py +32 -205
  107. evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +80 -99
  108. evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +64 -110
  109. evalscope/benchmarks/mmmu/__init__.py +0 -0
  110. evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
  111. evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
  112. evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +129 -0
  113. evalscope/benchmarks/musr/musr_adapter.py +33 -64
  114. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +196 -152
  115. evalscope/benchmarks/process_bench/process_bench_adapter.py +144 -76
  116. evalscope/benchmarks/race/race_adapter.py +33 -119
  117. evalscope/benchmarks/simple_qa/simple_qa_adapter.py +72 -70
  118. evalscope/benchmarks/super_gpqa/{five_shot_prompt.txt → prompt.py} +14 -16
  119. evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +73 -117
  120. evalscope/benchmarks/super_gpqa/utils.py +2 -1
  121. evalscope/benchmarks/tau_bench/generation.py +147 -0
  122. evalscope/benchmarks/tau_bench/tau_bench_adapter.py +114 -60
  123. evalscope/benchmarks/text2image/__init__.py +0 -0
  124. evalscope/benchmarks/text2image/evalmuse_adapter.py +78 -0
  125. evalscope/benchmarks/text2image/genai_bench_adapter.py +53 -0
  126. evalscope/benchmarks/text2image/general_t2i_adapter.py +42 -0
  127. evalscope/benchmarks/text2image/hpdv2_adapter.py +52 -0
  128. evalscope/benchmarks/text2image/tifa_adapter.py +27 -0
  129. evalscope/benchmarks/tool_bench/tool_bench_adapter.py +91 -70
  130. evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +56 -124
  131. evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +70 -266
  132. evalscope/benchmarks/winogrande/winogrande_adapter.py +28 -54
  133. evalscope/cli/cli.py +2 -0
  134. evalscope/cli/start_app.py +7 -1
  135. evalscope/cli/start_perf.py +7 -1
  136. evalscope/cli/start_server.py +6 -3
  137. evalscope/collections/__init__.py +2 -10
  138. evalscope/collections/sampler.py +10 -10
  139. evalscope/collections/schema.py +13 -11
  140. evalscope/config.py +157 -57
  141. evalscope/constants.py +37 -61
  142. evalscope/evaluator/__init__.py +1 -1
  143. evalscope/evaluator/evaluator.py +275 -419
  144. evalscope/filters/__init__.py +2 -0
  145. evalscope/filters/extraction.py +126 -0
  146. evalscope/filters/selection.py +57 -0
  147. evalscope/metrics/__init__.py +13 -13
  148. evalscope/metrics/llm_judge.py +47 -33
  149. evalscope/metrics/math_parser.py +27 -22
  150. evalscope/metrics/metric.py +307 -0
  151. evalscope/metrics/metrics.py +22 -18
  152. evalscope/metrics/t2v_metrics/__init__.py +0 -52
  153. evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +4 -2
  154. evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +9 -13
  155. evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +2 -1
  156. evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +3 -2
  157. evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +2 -1
  158. evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +2 -2
  159. evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +2 -1
  160. evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +4 -2
  161. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +10 -5
  162. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +4 -2
  163. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +2 -1
  164. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +15 -9
  165. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +4 -2
  166. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +15 -10
  167. evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +9 -6
  168. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +2 -2
  169. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +4 -2
  170. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +4 -2
  171. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +3 -9
  172. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +16 -10
  173. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +3 -2
  174. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +4 -2
  175. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +8 -4
  176. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +47 -25
  177. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +12 -7
  178. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +23 -17
  179. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +33 -23
  180. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +2 -1
  181. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +46 -30
  182. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +69 -37
  183. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +7 -5
  184. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +6 -4
  185. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +7 -5
  186. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +3 -2
  187. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +5 -2
  188. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +17 -13
  189. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +35 -19
  190. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +14 -12
  191. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +63 -52
  192. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +63 -38
  193. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +6 -3
  194. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +6 -2
  195. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +3 -2
  196. evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +15 -13
  197. evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +3 -2
  198. evalscope/models/__init__.py +6 -29
  199. evalscope/models/image_edit_model.py +125 -0
  200. evalscope/models/mockllm.py +65 -0
  201. evalscope/models/model_apis.py +67 -0
  202. evalscope/models/modelscope.py +455 -0
  203. evalscope/models/openai_compatible.py +126 -0
  204. evalscope/models/text2image_model.py +124 -0
  205. evalscope/models/utils/openai.py +701 -0
  206. evalscope/perf/benchmark.py +4 -1
  207. evalscope/perf/http_client.py +4 -2
  208. evalscope/perf/plugin/api/custom_api.py +5 -4
  209. evalscope/perf/plugin/api/openai_api.py +11 -9
  210. evalscope/perf/plugin/datasets/custom.py +2 -1
  211. evalscope/perf/plugin/datasets/flickr8k.py +1 -1
  212. evalscope/perf/plugin/datasets/kontext_bench.py +1 -1
  213. evalscope/perf/plugin/datasets/line_by_line.py +2 -1
  214. evalscope/perf/plugin/datasets/longalpaca.py +2 -1
  215. evalscope/perf/plugin/datasets/openqa.py +4 -2
  216. evalscope/perf/utils/benchmark_util.py +15 -10
  217. evalscope/perf/utils/db_util.py +9 -6
  218. evalscope/perf/utils/local_server.py +11 -3
  219. evalscope/perf/utils/rich_display.py +16 -10
  220. evalscope/report/__init__.py +2 -3
  221. evalscope/report/combinator.py +18 -12
  222. evalscope/report/generator.py +51 -35
  223. evalscope/report/{utils.py → report.py} +8 -6
  224. evalscope/run.py +33 -47
  225. evalscope/summarizer.py +1 -1
  226. evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -4
  227. evalscope/utils/__init__.py +21 -2
  228. evalscope/utils/chat_service.py +3 -2
  229. evalscope/utils/deprecation_utils.py +12 -1
  230. evalscope/utils/function_utils.py +29 -0
  231. evalscope/utils/import_utils.py +23 -1
  232. evalscope/utils/io_utils.py +142 -6
  233. evalscope/utils/json_schema.py +208 -0
  234. evalscope/utils/logger.py +51 -12
  235. evalscope/utils/model_utils.py +11 -7
  236. evalscope/utils/multi_choices.py +288 -0
  237. evalscope/utils/url_utils.py +65 -0
  238. evalscope/version.py +2 -2
  239. {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/METADATA +108 -62
  240. {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/RECORD +258 -226
  241. tests/benchmark/test_eval.py +385 -0
  242. tests/benchmark/test_image_edit.py +65 -0
  243. tests/{aigc → benchmark}/test_t2i.py +22 -4
  244. tests/benchmark/test_vlm.py +80 -0
  245. tests/cli/test_all.py +85 -47
  246. tests/cli/test_collection.py +20 -8
  247. tests/cli/test_custom.py +22 -15
  248. tests/cli/test_reasoning.py +81 -0
  249. tests/common.py +73 -0
  250. tests/perf/test_perf.py +4 -2
  251. tests/rag/test_clip_benchmark.py +0 -2
  252. evalscope/benchmarks/aigc/t2i/base.py +0 -56
  253. evalscope/benchmarks/aigc/t2i/evalmuse_adapter.py +0 -78
  254. evalscope/benchmarks/aigc/t2i/genai_bench_adapter.py +0 -58
  255. evalscope/benchmarks/aigc/t2i/general_t2i_adapter.py +0 -58
  256. evalscope/benchmarks/aigc/t2i/hpdv2_adapter.py +0 -57
  257. evalscope/benchmarks/aigc/t2i/tifa_adapter.py +0 -37
  258. evalscope/benchmarks/arc/ai2_arc.py +0 -151
  259. evalscope/benchmarks/benchmark.py +0 -81
  260. evalscope/benchmarks/ceval/ceval_exam.py +0 -146
  261. evalscope/benchmarks/cmmlu/cmmlu.py +0 -161
  262. evalscope/benchmarks/cmmlu/samples.jsonl +0 -5
  263. evalscope/benchmarks/competition_math/competition_math.py +0 -79
  264. evalscope/benchmarks/data_adapter.py +0 -528
  265. evalscope/benchmarks/filters.py +0 -59
  266. evalscope/benchmarks/gsm8k/gsm8k.py +0 -121
  267. evalscope/benchmarks/hellaswag/hellaswag.py +0 -112
  268. evalscope/benchmarks/humaneval/humaneval.py +0 -79
  269. evalscope/benchmarks/mmlu/mmlu.py +0 -160
  270. evalscope/benchmarks/mmlu/samples.jsonl +0 -5
  271. evalscope/benchmarks/process_bench/critique_template.txt +0 -13
  272. evalscope/benchmarks/race/race.py +0 -104
  273. evalscope/benchmarks/race/samples.jsonl +0 -5
  274. evalscope/benchmarks/super_gpqa/zero_shot_prompt.txt +0 -4
  275. evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -89
  276. evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -163
  277. evalscope/benchmarks/utils.py +0 -60
  278. evalscope/collections/evaluator.py +0 -375
  279. evalscope/metrics/completion_parsers.py +0 -227
  280. evalscope/metrics/named_metrics.py +0 -55
  281. evalscope/models/adapters/__init__.py +0 -14
  282. evalscope/models/adapters/base_adapter.py +0 -84
  283. evalscope/models/adapters/bfcl_adapter.py +0 -246
  284. evalscope/models/adapters/chat_adapter.py +0 -207
  285. evalscope/models/adapters/choice_adapter.py +0 -222
  286. evalscope/models/adapters/custom_adapter.py +0 -71
  287. evalscope/models/adapters/server_adapter.py +0 -236
  288. evalscope/models/adapters/t2i_adapter.py +0 -79
  289. evalscope/models/adapters/tau_bench_adapter.py +0 -189
  290. evalscope/models/custom/__init__.py +0 -4
  291. evalscope/models/custom/custom_model.py +0 -50
  292. evalscope/models/custom/dummy_model.py +0 -99
  293. evalscope/models/local_model.py +0 -128
  294. evalscope/models/register.py +0 -41
  295. tests/cli/test_run.py +0 -489
  296. /evalscope/{benchmarks/aigc → api}/__init__.py +0 -0
  297. /evalscope/benchmarks/{aigc/t2i → image_edit}/__init__.py +0 -0
  298. {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/LICENSE +0 -0
  299. {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/WHEEL +0 -0
  300. {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/entry_points.txt +0 -0
  301. {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/top_level.txt +0 -0
  302. /tests/{aigc → benchmark}/__init__.py +0 -0
@@ -1,88 +1,138 @@
1
- from tqdm import tqdm
2
-
3
- from evalscope.benchmarks import Benchmark, DataAdapter
1
+ from typing import Any, Dict
2
+
3
+ from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
4
+ from evalscope.api.dataset import Sample
5
+ from evalscope.api.evaluator import TaskState
6
+ from evalscope.api.messages.chat_message import ChatMessageUser
7
+ from evalscope.api.metric import Score
8
+ from evalscope.api.registry import register_benchmark
9
+ from evalscope.constants import Tags
10
+ from evalscope.utils.io_utils import convert_numpy_types
4
11
  from evalscope.utils.logger import get_logger
5
12
 
6
13
  logger = get_logger()
7
14
 
8
15
 
9
- @Benchmark.register(
10
- name='live_code_bench',
11
- pretty_name='Live-Code-Bench',
12
- tags=['Coding'],
13
- description=
14
- 'Live Code Bench is a benchmark for evaluating code generation models on real-world coding tasks. It includes a variety of programming problems with test cases to assess the model\'s ability to generate correct and efficient code solutions.', # noqa: E501
15
- dataset_id='AI-ModelScope/code_generation_lite',
16
- subset_list=['release_latest'],
17
- metric_list=['Pass@1'],
18
- few_shot_num=0,
19
- train_split=None,
20
- eval_split='test',
21
- extra_params={
22
- 'start_date': None,
23
- 'end_date': None,
24
- 'timeout': 6,
25
- 'debug': False
26
- },
27
- system_prompt=
28
- 'You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.', # noqa: E501
29
- prompt_template=
30
- '### Question:\n{question_content}\n\n{format_prompt} ### Answer: (use the provided format with backticks)\n\n', # noqa: E501
16
+ @register_benchmark(
17
+ BenchmarkMeta(
18
+ name='live_code_bench',
19
+ pretty_name='Live-Code-Bench',
20
+ tags=[Tags.CODING],
21
+ description=
22
+ 'Live Code Bench is a benchmark for evaluating code generation models on real-world coding tasks. It includes a variety of programming problems with test cases to assess the model\'s ability to generate correct and efficient code solutions.', # noqa: E501
23
+ dataset_id='AI-ModelScope/code_generation_lite',
24
+ subset_list=['release_latest'],
25
+ metric_list=['Pass@1'],
26
+ eval_split='test',
27
+ prompt_template=
28
+ '### Question:\n{question_content}\n\n{format_prompt} ### Answer: (use the provided format with backticks)\n\n',
29
+ extra_params={
30
+ 'start_date': None,
31
+ 'end_date': None,
32
+ 'timeout': 6,
33
+ 'debug': False
34
+ },
35
+ )
31
36
  )
32
- class LiveCodeBenchAdapter(DataAdapter):
37
+ class LiveCodeBenchAdapter(DefaultDataAdapter):
38
+ """
39
+ Live Code Bench adapter using the new data processing framework.
40
+ """
33
41
 
34
42
  def __init__(self, **kwargs):
35
43
  super().__init__(**kwargs)
36
44
 
37
- extra_params = kwargs.get('extra_params', {})
38
-
39
- self.timeout = extra_params.get('timeout', 6)
40
- self.debug = extra_params.get('debug', False)
41
- self.start_date = extra_params.get('start_date')
42
- self.end_date = extra_params.get('end_date')
43
-
44
- def load(self, **kwargs) -> dict:
45
- from .load_utils import filter_date, transform
46
-
47
- # Note: need trust_remote_code=True to load the python script
48
- dataset_dict = super().load(trust_remote_code=True, **kwargs)
49
- new_dataset_dict = {}
50
- for subset_key, dataset in dataset_dict.items():
51
- datasets = dataset[self.eval_split]
52
- filtered_datasets = filter_date(datasets, start_date=self.start_date, end_date=self.end_date)
53
-
54
- transformed_datasets = [transform(item) for item in tqdm(filtered_datasets, desc='Transforming data')]
55
- new_dataset_dict[subset_key] = {self.eval_split: transformed_datasets}
56
- return new_dataset_dict
57
-
58
- def gen_prompt(self, input_d: dict, few_shot_list: list, **kwargs) -> dict:
59
- """
60
- Generate the prompt for the model input.
61
- """
62
- format_prompt = input_d['format_prompt']
63
- question_content = input_d['question_content']
45
+ self.timeout = self.extra_params.get('timeout', 6)
46
+ self.debug = self.extra_params.get('debug', False)
47
+ self.start_date = self.extra_params.get('start_date')
48
+ self.end_date = self.extra_params.get('end_date')
49
+
50
+ self.save_metadata = False # Don't save metadata, since they are large
51
+
52
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
53
+ """Convert a data record to a Sample object."""
54
+ from .load_utils import transform
55
+
56
+ record = transform(record)
57
+
58
+ question_content = record['question_content']
59
+ format_prompt = record['format_prompt']
64
60
  full_prompt = self.prompt_template.format(question_content=question_content, format_prompt=format_prompt)
65
61
 
66
- return self.gen_prompt_data(full_prompt)
62
+ return Sample(
63
+ input=[ChatMessageUser(content=full_prompt)],
64
+ target='',
65
+ metadata={
66
+ 'evaluation_sample': record['evaluation_sample'],
67
+ 'contest_date': record['contest_date']
68
+ }
69
+ )
67
70
 
68
- def get_gold_answer(self, input_d: dict) -> str:
69
- # Extract the gold answer from the input dict.
70
- return input_d
71
+ def sample_filter(self, sample):
72
+ from .load_utils import filter_date
71
73
 
72
- def match(self, gold: dict, pred: str) -> float:
73
- from .evaluate_utils import codegen_metrics
74
+ return filter_date(sample.metadata['contest_date'], start_date=self.start_date, end_date=self.end_date)
75
+
76
+ def extract_answer(self, prediction: str, task_state: TaskState) -> str:
77
+ """Extract code from the prediction."""
74
78
  from .extract_utils import extract_code_generation
79
+ return extract_code_generation(prediction)
75
80
 
76
- ext_pred = extract_code_generation(pred)
77
-
78
- references = [{'input_output': gold['evaluation_sample']}]
79
- predictions = [[ext_pred]]
80
- metrics, eval_results, final_metadata = codegen_metrics(
81
- references,
82
- predictions,
83
- k_list=[1],
84
- num_process_evaluate=1,
85
- timeout=self.timeout,
86
- debug=self.debug,
81
+ def match_score(
82
+ self, original_prediction: str, filtered_prediction: str, reference: str, task_state: TaskState
83
+ ) -> Score:
84
+ from .evaluate_utils import codegen_metrics
85
+
86
+ score = Score(
87
+ extracted_prediction=filtered_prediction,
88
+ prediction=original_prediction,
87
89
  )
88
- return metrics['pass@1'] / 100 # convert to point scale
90
+
91
+ references = [{'input_output': task_state.metadata['evaluation_sample']}]
92
+ predictions = [[filtered_prediction]]
93
+
94
+ try:
95
+ metrics, eval_results, final_metadata = codegen_metrics(
96
+ references,
97
+ predictions,
98
+ k_list=[1],
99
+ num_process_evaluate=1,
100
+ timeout=self.timeout,
101
+ debug=self.debug,
102
+ )
103
+ pass_rate = metrics['pass@1'] / 100 # convert to point scale
104
+
105
+ score.value = {'pass': float(pass_rate > 0)}
106
+ score.explanation = f"Pass@1: {metrics['pass@1']}%"
107
+
108
+ # Convert numpy types to native Python types for JSON serialization
109
+ serializable_eval_results = convert_numpy_types(eval_results)
110
+ serializable_final_metadata = convert_numpy_types(final_metadata)
111
+
112
+ score.metadata = {
113
+ 'pass_rate': float(pass_rate),
114
+ 'timeout': self.timeout,
115
+ 'debug': self.debug,
116
+ 'eval_results': serializable_eval_results,
117
+ 'final_metadata': serializable_final_metadata
118
+ }
119
+ except Exception as e:
120
+ score.value = {'pass': False}
121
+ score.explanation = f'Evaluation failed: {str(e)}'
122
+ score.metadata = {'error': str(e)}
123
+
124
+ score.main_score_name = 'pass'
125
+ return score
126
+
127
+ def aggregate_scores(self, sample_scores):
128
+ from evalscope.metrics.metric import PassAtK
129
+
130
+ # calculate pass@k here
131
+ agg_list = []
132
+ for metric in self.metric_list:
133
+ if metric.lower().startswith('pass@'):
134
+ k = int(metric.split('@')[1])
135
+ # Get the scores for this metric
136
+ agg = PassAtK(k)
137
+ agg_list.extend(agg(sample_scores))
138
+ return agg_list
@@ -32,8 +32,8 @@ def transform(item):
32
32
  private_test_cases = json.loads(item['private_test_cases'])
33
33
  except Exception as e: # noqa: F841
34
34
  private_test_cases = json.loads(
35
- pickle.loads(zlib.decompress(base64.b64decode(private_test_cases.encode('utf-8')) # type: ignore
36
- ))) # type: ignore
35
+ pickle.loads(zlib.decompress(base64.b64decode(private_test_cases.encode('utf-8'))))
36
+ )
37
37
 
38
38
  # load metadata
39
39
  metadata = json.loads(item['metadata'])
@@ -47,25 +47,17 @@ def transform(item):
47
47
  return item
48
48
 
49
49
 
50
- def filter_date(dataset, start_date=None, end_date=None):
51
- new_dataset = []
52
-
53
- for item in dataset:
54
- contest_date = datetime.fromisoformat(item['contest_date'])
55
- if start_date is not None:
56
- p_start_date = datetime.strptime(start_date, '%Y-%m-%d')
57
- if p_start_date > contest_date:
58
- continue
50
+ def filter_date(contest_date, start_date=None, end_date=None) -> bool:
59
51
 
60
- if end_date is not None:
61
- p_end_date = datetime.strptime(end_date, '%Y-%m-%d')
62
- if p_end_date < contest_date:
63
- continue
52
+ contest_date = datetime.fromisoformat(contest_date)
53
+ if start_date is not None:
54
+ p_start_date = datetime.strptime(start_date, '%Y-%m-%d')
55
+ if p_start_date > contest_date:
56
+ return False
64
57
 
65
- new_dataset.append(item)
58
+ if end_date is not None:
59
+ p_end_date = datetime.strptime(end_date, '%Y-%m-%d')
60
+ if p_end_date < contest_date:
61
+ return False
66
62
 
67
- if start_date or end_date:
68
- logger.info(
69
- f'Filtered dataset with start_date: {start_date}, end_date: {end_date}, remaining items: {len(new_dataset)}'
70
- )
71
- return new_dataset
63
+ return True
@@ -4,18 +4,22 @@ import faulthandler
4
4
  import json
5
5
  import numpy as np
6
6
  import platform
7
+
7
8
  # to run the solution files we're using a timing based approach
8
9
  import signal
9
10
  import sys
10
11
  import time
12
+
11
13
  # used for debugging to time steps
12
14
  from datetime import datetime
13
15
  from decimal import Decimal
14
16
  from enum import Enum
15
17
  from functools import partial
16
18
  from io import StringIO
19
+
17
20
  # from pyext import RuntimeModule
18
21
  from types import ModuleType
22
+
19
23
  # used for testing the code that reads from input
20
24
  from unittest.mock import mock_open, patch
21
25
 
@@ -342,8 +346,8 @@ def grade_stdio(
342
346
  return all_results, WA_send_args
343
347
 
344
348
  for output_line_idx, (
345
- stripped_prediction_line,
346
- stripped_gt_out_line,
349
+ stripped_prediction_line,
350
+ stripped_gt_out_line,
347
351
  ) in enumerate(zip(stripped_prediction_lines, stripped_gt_out_lines)):
348
352
  WA_send_args['error_message'] = (
349
353
  f'Wrong answer at {output_line_idx=}: {truncatefn(stripped_prediction_line)} != {truncatefn(stripped_gt_out_line)}'
@@ -1,82 +1,56 @@
1
1
  from typing import Any
2
2
 
3
- from evalscope.benchmarks import Benchmark, DataAdapter
4
- from evalscope.constants import EvalType, OutputType
5
- from evalscope.metrics import exact_match
6
- from evalscope.metrics.completion_parsers import ResponseParser
7
-
8
- SUBSET_LIST = ['default']
9
-
10
-
11
- @Benchmark.register(
12
- name='maritime_bench',
13
- pretty_name='MaritimeBench',
14
- tags=['Maritime', 'MCQ', 'Knowledge'],
15
- description=
16
- 'MaritimeBench is a benchmark for evaluating AI models on maritime-related multiple-choice questions. It consists of questions related to maritime knowledge, where the model must select the correct answer from given options.', # noqa: E501
17
- dataset_id='HiDolphin/MaritimeBench',
18
- model_adapter=OutputType.GENERATION,
19
- output_types=[OutputType.MULTIPLE_CHOICE, OutputType.GENERATION],
20
- subset_list=SUBSET_LIST,
21
- metric_list=['AverageAccuracy'],
22
- eval_split='test',
23
- prompt_template=
24
- '题目来自于{subset_name}请回答单选题。要求只输出选项,不输出解释,将选项放在<>里,直接输出答案。示例:\n\n题目:在船舶主推进动力装置中,传动轴系在运转中承受以下复杂的应力和负荷,但不包括______。\n选项:\nA. 电磁力\nB. 压拉应力\nC. 弯曲应力\nD. 扭应力\n答:<A> 当前题目\n {query}', # noqa: E501
3
+ from evalscope.api.benchmark import BenchmarkMeta, MultiChoiceAdapter
4
+ from evalscope.api.dataset import Sample
5
+ from evalscope.api.registry import register_benchmark
6
+ from evalscope.constants import Tags
7
+
8
+ MARITIME_PROMPT_TEMPLATE = '请回答单选题。要求只输出选项,不输出解释,将选项放在[]里,直接输出答案。示例:\n\n题目:在船舶主推进动力装置中,传动轴系在运转中承受以下复杂的应力和负荷,但不包括______。\n选项:\nA. 电磁力\nB. 压拉应力\nC. 弯曲应力\nD. 扭应力\n答:[A]\n 当前题目\n {question}\n选项:\n{choices}' # noqa: E501
9
+
10
+
11
+ @register_benchmark(
12
+ BenchmarkMeta(
13
+ name='maritime_bench',
14
+ pretty_name='MaritimeBench',
15
+ tags=[Tags.CHINESE, Tags.MULTIPLE_CHOICE, Tags.KNOWLEDGE],
16
+ description=
17
+ 'MaritimeBench is a benchmark for evaluating AI models on maritime-related multiple-choice questions. It consists of questions related to maritime knowledge, where the model must select the correct answer from given options.', # noqa: E501
18
+ dataset_id='HiDolphin/MaritimeBench',
19
+ metric_list=['acc'],
20
+ few_shot_num=0,
21
+ eval_split='test',
22
+ prompt_template=MARITIME_PROMPT_TEMPLATE,
23
+ )
25
24
  )
26
- class MaritimeBenchAdapter(DataAdapter):
25
+ class MaritimeBenchAdapter(MultiChoiceAdapter):
27
26
 
28
27
  def __init__(self, **kwargs):
29
28
  super().__init__(**kwargs)
30
29
 
31
- self.choices = ['A', 'B', 'C', 'D']
32
-
33
- def gen_prompt(self, input_d: dict, subset_name: str, few_shot_list: list, **kwargs) -> Any:
34
-
35
- prefix = ''
36
- query = prefix + input_d['question'] + '\n'
37
- available_choices = []
38
- for option in self.choices:
39
- if option in input_d and input_d[option]:
40
- query += option + ':' + input_d[option] + '\n'
41
- available_choices.append(option)
42
-
43
- full_prompt = self.prompt_template.format(subset_name=subset_name, query=query)
44
- return self.gen_prompt_data(full_prompt, choices=available_choices)
45
-
46
- def get_gold_answer(self, input_d: dict) -> str:
47
- """
48
- Parse the raw input labels (gold).
49
-
50
- Args:
51
- input_d: input raw data. Depending on the dataset.
52
-
53
- Returns:
54
- The parsed input. e.g. gold answer ... Depending on the dataset.
55
- """
56
- return input_d['answer']
57
-
58
- def parse_pred_result(self, result: str, raw_input_d: dict = None, eval_type: str = EvalType.CHECKPOINT) -> str:
59
- """
60
- Parse the raw model prediction (pred).
61
-
62
- Args:
63
- pred: model prediction. Depending on the model.
64
-
65
- Returns:
66
- The parsed prediction. e.g. model answer... Depending on the model.
67
- """
68
-
69
- return ResponseParser.parse_bracketed_answer(result, options=self.choices)
70
-
71
- def match(self, gold: Any, pred: Any) -> Any:
72
- """
73
- Match the gold answer with the predicted answer.
74
-
75
- Args:
76
- gold: The gold answer.
77
- pred: The predicted answer.
78
-
79
- Returns:
80
- The result of the match.
81
- """
82
- return exact_match(gold=gold, pred=pred)
30
+ self.reformat_subset = True
31
+
32
+ def record_to_sample(self, record) -> Sample:
33
+ # Extract available choices from the record
34
+ choices = []
35
+ choice_letters = ['A', 'B', 'C', 'D']
36
+ for letter in choice_letters:
37
+ if letter in record and record[letter]:
38
+ choices.append(record[letter])
39
+
40
+ return Sample(
41
+ input=record['question'],
42
+ choices=choices,
43
+ target=record['answer'],
44
+ )
45
+
46
+ def format_prompt_template(self, sample):
47
+ choices = '\n'.join([f'{chr(65 + i)}. {choice}' for i, choice in enumerate(sample.choices)])
48
+ return MARITIME_PROMPT_TEMPLATE.format(question=sample.input, choices=choices)
49
+
50
+ def extract_answer(self, prediction, task_state):
51
+ # use regex to extract the answer from the prediction
52
+ import re
53
+ match = re.search(r'\[([A-D])\]', prediction)
54
+ if match:
55
+ return match.group(1)
56
+ return ''
@@ -1,58 +1,51 @@
1
- from evalscope.benchmarks import Benchmark, DataAdapter
2
- from evalscope.metrics import extract_answer, math_equal, strip_answer_string
3
- from evalscope.utils.logger import get_logger
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
3
+ from typing import Any, Dict
4
4
 
5
- # flake8: noqa
5
+ from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
6
+ from evalscope.api.dataset import Sample
7
+ from evalscope.api.evaluator import TaskState
8
+ from evalscope.api.registry import register_benchmark
9
+ from evalscope.constants import Tags
10
+ from evalscope.utils.logger import get_logger
6
11
 
7
12
  logger = get_logger()
8
13
 
9
14
 
10
- @Benchmark.register(
11
- name='math_500',
12
- pretty_name='MATH-500',
13
- tags=['Mathematics'],
14
- description=
15
- "MATH-500 is a benchmark for evaluating mathematical reasoning capabilities of AI models. It consists of 500 diverse math problems across five levels of difficulty, designed to test a model's ability to solve complex mathematical problems by generating step-by-step solutions and providing the correct final answer.", # noqa: E501
16
- dataset_id='AI-ModelScope/MATH-500',
17
- subset_list=['Level 1', 'Level 2', 'Level 3', 'Level 4', 'Level 5'],
18
- metric_list=['AveragePass@1'],
19
- few_shot_num=0,
20
- train_split=None,
21
- eval_split='test',
22
- prompt_template='{query}\nPlease reason step by step, and put your final answer within \\boxed{{}}.',
15
+ @register_benchmark(
16
+ BenchmarkMeta(
17
+ name='math_500',
18
+ pretty_name='MATH-500',
19
+ tags=[Tags.MATH, Tags.REASONING],
20
+ description=
21
+ "MATH-500 is a benchmark for evaluating mathematical reasoning capabilities of AI models. It consists of 500 diverse math problems across five levels of difficulty, designed to test a model's ability to solve complex mathematical problems by generating step-by-step solutions and providing the correct final answer.", # noqa: E501
22
+ dataset_id='AI-ModelScope/MATH-500',
23
+ subset_list=['Level 1', 'Level 2', 'Level 3', 'Level 4', 'Level 5'],
24
+ metric_list=[{
25
+ 'acc': {
26
+ 'numeric': True
27
+ }
28
+ }],
29
+ few_shot_num=0,
30
+ train_split=None,
31
+ eval_split='test',
32
+ prompt_template='{question}\nPlease reason step by step, and put your final answer within \\boxed{{}}.',
33
+ )
23
34
  )
24
- class Math500Adapter(DataAdapter):
35
+ class Math500Adapter(DefaultDataAdapter):
25
36
 
26
37
  def __init__(self, *args, **kwargs):
27
38
  super().__init__(*args, **kwargs)
28
39
 
29
- def load(self, **kwargs):
30
- # default load all levels
31
- kwargs['subset_list'] = ['default']
32
- data_dict = super().load(**kwargs)
33
- return self.reformat_subset(data_dict, subset_key='level', format='Level {}')
34
-
35
- def gen_prompt(self, input_d: dict, few_shot_list: list, **kwargs) -> dict:
36
- """
37
- Generate the prompt for the model input.
38
- """
39
- problem = input_d['problem']
40
- full_prompt = self.prompt_template.format(query=problem)
41
-
42
- return self.gen_prompt_data(full_prompt)
43
-
44
- def get_gold_answer(self, input_d: dict) -> str:
45
- # Extract the gold answer from the input dict.
46
- return strip_answer_string(input_d['answer'])
47
-
48
- def parse_pred_result(self, result: str, raw_input_d: dict = None, eval_type: str = 'checkpoint') -> str:
49
- """
50
- Parse the model output to get the answer. Could be the best choice index.
51
- """
52
- # Note: Use same extraction method for both of checkpoint/service/custom
53
- result = strip_answer_string(extract_answer(result))
54
- return result
55
-
56
- def match(self, gold: str, pred: str) -> float:
57
- res = math_equal(pred, gold)
58
- return 1.0 if res else 0.0
40
+ self.reformat_subset = True
41
+
42
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
43
+ return Sample(
44
+ input=record['problem'],
45
+ target=record['answer'],
46
+ subset_key=f"Level {record['level']}",
47
+ metadata={
48
+ 'question_id': record['unique_id'],
49
+ 'solution': record['solution'],
50
+ },
51
+ )
File without changes
@@ -0,0 +1,129 @@
1
+ # flake8: noqa: E501
2
+ import re
3
+ from typing import Any, Dict
4
+
5
+ from evalscope.api.benchmark import BenchmarkMeta, VisionLanguageAdapter
6
+ from evalscope.api.dataset import Sample
7
+ from evalscope.api.evaluator import TaskState
8
+ from evalscope.api.messages import ChatMessageUser, Content, ContentImage, ContentText
9
+ from evalscope.api.registry import register_benchmark
10
+ from evalscope.constants import Tags
11
+ from evalscope.utils.io_utils import bytes_to_base64
12
+ from evalscope.utils.logger import get_logger
13
+ from evalscope.utils.multi_choices import MultipleChoiceTemplate, parse_answers, prompt
14
+
15
+ logger = get_logger()
16
+
17
+ SUBSET_LIST = ['default']
18
+
19
+ OPEN_PROMPT = """
20
+ Solve the following problem step by step. The last line of your response should be of the form "ANSWER: $ANSWER" (without quotes) where $ANSWER is the answer to the problem.
21
+
22
+ {question}
23
+
24
+ Remember to put your answer on its own line at the end in the form "ANSWER: $ANSWER" (without quotes) where $ANSWER is the answer to the problem, and you do not need to use a \\boxed command.
25
+ """
26
+
27
+ MULT_CHOICE_PROMPT = MultipleChoiceTemplate.SINGLE_ANSWER_COT
28
+
29
+ MULTI_CHOICE_TYPE = 'multi_choice'
30
+ OPEN_TYPE = 'free_form'
31
+
32
+
33
+ @register_benchmark(
34
+ BenchmarkMeta(
35
+ name='math_vista',
36
+ pretty_name='MathVista',
37
+ dataset_id='evalscope/MathVista',
38
+ tags=[Tags.MATH, Tags.REASONING, Tags.MULTIPLE_CHOICE, Tags.MULTI_MODAL],
39
+ description=
40
+ 'MathVista is a consolidated Mathematical reasoning benchmark within Visual contexts. It consists of three newly created datasets, IQTest, FunctionQA, and PaperQA, which address the missing visual domains and are tailored to evaluate logical reasoning on puzzle test figures, algebraic reasoning over functional plots, and scientific reasoning with academic paper figures, respectively. It also incorporates 9 MathQA datasets and 19 VQA datasets from the literature, which significantly enrich the diversity and complexity of visual perception and mathematical reasoning challenges within our benchmark. In total, MathVista includes 6,141 examples collected from 31 different datasets.',
41
+ subset_list=SUBSET_LIST,
42
+ metric_list=['acc'],
43
+ eval_split='testmini',
44
+ prompt_template=OPEN_PROMPT,
45
+ )
46
+ )
47
+ class MathVistaAdapter(VisionLanguageAdapter):
48
+
49
+ def __init__(self, **kwargs):
50
+ super().__init__(**kwargs)
51
+
52
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
53
+ content_list, answers_list = MathVistaAdapter.create_content_and_answers_list(record)
54
+
55
+ if record['question_type'] == 'multi_choice':
56
+ label_answer = self.get_option_label(answers_list, record['answer'])
57
+ return Sample(
58
+ input=[ChatMessageUser(content=content_list)],
59
+ choices=answers_list,
60
+ target=label_answer,
61
+ metadata={
62
+ 'question_type': record['question_type'],
63
+ 'answer_type': record['answer_type'],
64
+ **record['metadata'],
65
+ }
66
+ )
67
+ elif record['question_type'] == 'free_form':
68
+ return Sample(
69
+ input=[ChatMessageUser(content=content_list)],
70
+ target=record['answer'],
71
+ metadata={
72
+ 'precision': record['precision'],
73
+ 'question_type': record['question_type'],
74
+ 'answer_type': record['answer_type'],
75
+ **record['metadata'],
76
+ }
77
+ )
78
+ else:
79
+ raise ValueError(f"Unexpected question_type: {record['question_type']}")
80
+
81
+ def get_option_label(self, options, value):
82
+ try:
83
+ index = options.index(value)
84
+ return chr(ord('A') + index)
85
+ except ValueError:
86
+ logger.warning(f"Answer '{value}' not found in options: {options}. This may cause evaluation issues.")
87
+ return value
88
+
89
+ def extract_answer(self, prediction: str, task_state: TaskState) -> str:
90
+ question_type = task_state.metadata['question_type']
91
+ if question_type == MULTI_CHOICE_TYPE:
92
+ answers = parse_answers(task_state)
93
+ return ''.join(sorted(list(answers)))
94
+ elif question_type == OPEN_TYPE:
95
+ pattern = r'ANSWER:\s*(.*)'
96
+ match = re.search(pattern, prediction)
97
+ if match:
98
+ return match.group(1).strip()
99
+ return ''
100
+ else:
101
+ raise ValueError(f'Unsupported question type: {question_type}')
102
+
103
+ @staticmethod
104
+ def create_content_and_answers_list(record: dict[str, Any], ) -> tuple[list[Content], list[str]]:
105
+ """
106
+ Create a list of content elements and a list of answers from a record.
107
+
108
+ Args:
109
+ record (dict): The record containing question, images, and options.
110
+
111
+
112
+ Returns:
113
+ tuple: A tuple containing:
114
+ - content_list (list): A list of content elements (text and images).
115
+ - answers_list (list): A list of possible answers (for multiple-choice questions).
116
+ """
117
+ question_type = record['question_type']
118
+ if question_type == MULTI_CHOICE_TYPE:
119
+ answers_list = record['choices']
120
+ input_text = prompt(question=record['question'], choices=answers_list, template=MULT_CHOICE_PROMPT)
121
+ content_list: list[Content] = [ContentText(text=input_text)]
122
+ else:
123
+ answers_list: list[str] = []
124
+ content_list: list[Content] = [ContentText(text=OPEN_PROMPT.format(question=record['question']))]
125
+ image = record['decoded_image']
126
+ if image:
127
+ image_base64 = bytes_to_base64(image['bytes'], format='jpg', add_header=True)
128
+ content_list.append(ContentImage(image=image_base64))
129
+ return content_list, answers_list