evalscope 0.17.1__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (302) hide show
  1. evalscope/__init__.py +4 -1
  2. evalscope/api/benchmark/__init__.py +3 -0
  3. evalscope/api/benchmark/adapters/__init__.py +5 -0
  4. evalscope/api/benchmark/adapters/default_data_adapter.py +684 -0
  5. evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
  6. evalscope/api/benchmark/adapters/multi_choice_adapter.py +83 -0
  7. evalscope/api/benchmark/adapters/text2image_adapter.py +156 -0
  8. evalscope/api/benchmark/adapters/vision_language_adapter.py +6 -0
  9. evalscope/api/benchmark/benchmark.py +356 -0
  10. evalscope/api/benchmark/meta.py +121 -0
  11. evalscope/api/dataset/__init__.py +2 -0
  12. evalscope/api/dataset/dataset.py +349 -0
  13. evalscope/api/dataset/loader.py +262 -0
  14. evalscope/api/dataset/utils.py +143 -0
  15. evalscope/api/evaluator/__init__.py +3 -0
  16. evalscope/api/evaluator/cache.py +378 -0
  17. evalscope/api/evaluator/evaluator.py +56 -0
  18. evalscope/api/evaluator/state.py +275 -0
  19. evalscope/api/filter/__init__.py +1 -0
  20. evalscope/api/filter/filter.py +72 -0
  21. evalscope/api/messages/__init__.py +12 -0
  22. evalscope/api/messages/chat_message.py +243 -0
  23. evalscope/api/messages/content.py +102 -0
  24. evalscope/api/messages/utils.py +35 -0
  25. evalscope/api/metric/__init__.py +2 -0
  26. evalscope/api/metric/metric.py +55 -0
  27. evalscope/api/metric/scorer.py +113 -0
  28. evalscope/api/mixin/__init__.py +1 -0
  29. evalscope/api/mixin/llm_judge_mixin.py +168 -0
  30. evalscope/api/model/__init__.py +12 -0
  31. evalscope/api/model/generate_config.py +155 -0
  32. evalscope/api/model/model.py +386 -0
  33. evalscope/api/model/model_output.py +285 -0
  34. evalscope/api/registry.py +182 -0
  35. evalscope/api/tool/__init__.py +3 -0
  36. evalscope/api/tool/tool_call.py +101 -0
  37. evalscope/api/tool/tool_info.py +173 -0
  38. evalscope/api/tool/utils.py +64 -0
  39. evalscope/app/app.py +3 -0
  40. evalscope/app/ui/app_ui.py +2 -1
  41. evalscope/app/ui/multi_model.py +50 -25
  42. evalscope/app/ui/single_model.py +26 -14
  43. evalscope/app/utils/data_utils.py +43 -27
  44. evalscope/app/utils/env_utils.py +12 -0
  45. evalscope/app/utils/text_utils.py +14 -14
  46. evalscope/app/utils/visualization.py +9 -4
  47. evalscope/arguments.py +7 -10
  48. evalscope/backend/opencompass/api_meta_template.py +2 -1
  49. evalscope/backend/opencompass/backend_manager.py +6 -5
  50. evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +10 -10
  51. evalscope/backend/rag_eval/clip_benchmark/task_template.py +8 -4
  52. evalscope/backend/rag_eval/ragas/task_template.py +2 -1
  53. evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +2 -1
  54. evalscope/backend/rag_eval/ragas/tasks/build_transform.py +7 -4
  55. evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +2 -1
  56. evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +2 -1
  57. evalscope/backend/rag_eval/utils/embedding.py +10 -1
  58. evalscope/backend/rag_eval/utils/llm.py +13 -12
  59. evalscope/benchmarks/__init__.py +0 -2
  60. evalscope/benchmarks/aime/aime24_adapter.py +38 -40
  61. evalscope/benchmarks/aime/aime25_adapter.py +34 -40
  62. evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +86 -60
  63. evalscope/benchmarks/arc/arc_adapter.py +34 -147
  64. evalscope/benchmarks/arena_hard/arena_hard_adapter.py +96 -70
  65. evalscope/benchmarks/arena_hard/utils.py +37 -1
  66. evalscope/benchmarks/bbh/bbh_adapter.py +72 -144
  67. evalscope/benchmarks/bfcl/bfcl_adapter.py +188 -171
  68. evalscope/benchmarks/bfcl/generation.py +222 -0
  69. evalscope/benchmarks/ceval/ceval_adapter.py +93 -162
  70. evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +85 -82
  71. evalscope/benchmarks/cmmlu/cmmlu_adapter.py +34 -125
  72. evalscope/benchmarks/competition_math/competition_math_adapter.py +56 -108
  73. evalscope/benchmarks/data_collection/data_collection_adapter.py +187 -45
  74. evalscope/benchmarks/docmath/docmath_adapter.py +109 -51
  75. evalscope/benchmarks/docmath/utils.py +4 -5
  76. evalscope/benchmarks/drop/drop_adapter.py +88 -40
  77. evalscope/benchmarks/frames/frames_adapter.py +136 -52
  78. evalscope/benchmarks/general_arena/general_arena_adapter.py +140 -98
  79. evalscope/benchmarks/general_arena/utils.py +23 -27
  80. evalscope/benchmarks/general_mcq/general_mcq_adapter.py +40 -101
  81. evalscope/benchmarks/general_qa/general_qa_adapter.py +73 -134
  82. evalscope/benchmarks/gpqa/gpqa_adapter.py +61 -100
  83. evalscope/benchmarks/gpqa/{chain_of_thought.txt → prompt.py} +12 -5
  84. evalscope/benchmarks/gsm8k/gsm8k_adapter.py +62 -142
  85. evalscope/benchmarks/hellaswag/hellaswag_adapter.py +35 -124
  86. evalscope/benchmarks/hle/hle_adapter.py +127 -93
  87. evalscope/benchmarks/humaneval/humaneval_adapter.py +86 -55
  88. evalscope/benchmarks/ifeval/ifeval_adapter.py +69 -40
  89. evalscope/benchmarks/ifeval/instructions.py +109 -64
  90. evalscope/benchmarks/ifeval/instructions_registry.py +1 -1
  91. evalscope/benchmarks/ifeval/instructions_util.py +2 -3
  92. evalscope/benchmarks/ifeval/utils.py +6 -7
  93. evalscope/benchmarks/image_edit/gedit/__init__.py +0 -0
  94. evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
  95. evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
  96. evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
  97. evalscope/benchmarks/iquiz/iquiz_adapter.py +30 -65
  98. evalscope/benchmarks/live_code_bench/evaluate_utils.py +2 -2
  99. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +121 -71
  100. evalscope/benchmarks/live_code_bench/load_utils.py +13 -21
  101. evalscope/benchmarks/live_code_bench/testing_util.py +6 -2
  102. evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +49 -75
  103. evalscope/benchmarks/math_500/math_500_adapter.py +41 -48
  104. evalscope/benchmarks/math_vista/__init__.py +0 -0
  105. evalscope/benchmarks/math_vista/math_vista_adapter.py +129 -0
  106. evalscope/benchmarks/mmlu/mmlu_adapter.py +32 -205
  107. evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +80 -99
  108. evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +64 -110
  109. evalscope/benchmarks/mmmu/__init__.py +0 -0
  110. evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
  111. evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
  112. evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +129 -0
  113. evalscope/benchmarks/musr/musr_adapter.py +33 -64
  114. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +196 -152
  115. evalscope/benchmarks/process_bench/process_bench_adapter.py +144 -76
  116. evalscope/benchmarks/race/race_adapter.py +33 -119
  117. evalscope/benchmarks/simple_qa/simple_qa_adapter.py +72 -70
  118. evalscope/benchmarks/super_gpqa/{five_shot_prompt.txt → prompt.py} +14 -16
  119. evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +73 -117
  120. evalscope/benchmarks/super_gpqa/utils.py +2 -1
  121. evalscope/benchmarks/tau_bench/generation.py +147 -0
  122. evalscope/benchmarks/tau_bench/tau_bench_adapter.py +114 -60
  123. evalscope/benchmarks/text2image/__init__.py +0 -0
  124. evalscope/benchmarks/text2image/evalmuse_adapter.py +78 -0
  125. evalscope/benchmarks/text2image/genai_bench_adapter.py +53 -0
  126. evalscope/benchmarks/text2image/general_t2i_adapter.py +42 -0
  127. evalscope/benchmarks/text2image/hpdv2_adapter.py +52 -0
  128. evalscope/benchmarks/text2image/tifa_adapter.py +27 -0
  129. evalscope/benchmarks/tool_bench/tool_bench_adapter.py +91 -70
  130. evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +56 -124
  131. evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +70 -266
  132. evalscope/benchmarks/winogrande/winogrande_adapter.py +28 -54
  133. evalscope/cli/cli.py +2 -0
  134. evalscope/cli/start_app.py +7 -1
  135. evalscope/cli/start_perf.py +7 -1
  136. evalscope/cli/start_server.py +6 -3
  137. evalscope/collections/__init__.py +2 -10
  138. evalscope/collections/sampler.py +10 -10
  139. evalscope/collections/schema.py +13 -11
  140. evalscope/config.py +157 -57
  141. evalscope/constants.py +37 -61
  142. evalscope/evaluator/__init__.py +1 -1
  143. evalscope/evaluator/evaluator.py +275 -419
  144. evalscope/filters/__init__.py +2 -0
  145. evalscope/filters/extraction.py +126 -0
  146. evalscope/filters/selection.py +57 -0
  147. evalscope/metrics/__init__.py +13 -13
  148. evalscope/metrics/llm_judge.py +47 -33
  149. evalscope/metrics/math_parser.py +27 -22
  150. evalscope/metrics/metric.py +307 -0
  151. evalscope/metrics/metrics.py +22 -18
  152. evalscope/metrics/t2v_metrics/__init__.py +0 -52
  153. evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +4 -2
  154. evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +9 -13
  155. evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +2 -1
  156. evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +3 -2
  157. evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +2 -1
  158. evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +2 -2
  159. evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +2 -1
  160. evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +4 -2
  161. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +10 -5
  162. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +4 -2
  163. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +2 -1
  164. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +15 -9
  165. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +4 -2
  166. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +15 -10
  167. evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +9 -6
  168. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +2 -2
  169. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +4 -2
  170. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +4 -2
  171. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +3 -9
  172. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +16 -10
  173. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +3 -2
  174. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +4 -2
  175. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +8 -4
  176. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +47 -25
  177. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +12 -7
  178. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +23 -17
  179. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +33 -23
  180. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +2 -1
  181. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +46 -30
  182. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +69 -37
  183. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +7 -5
  184. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +6 -4
  185. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +7 -5
  186. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +3 -2
  187. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +5 -2
  188. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +17 -13
  189. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +35 -19
  190. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +14 -12
  191. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +63 -52
  192. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +63 -38
  193. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +6 -3
  194. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +6 -2
  195. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +3 -2
  196. evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +15 -13
  197. evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +3 -2
  198. evalscope/models/__init__.py +6 -29
  199. evalscope/models/image_edit_model.py +125 -0
  200. evalscope/models/mockllm.py +65 -0
  201. evalscope/models/model_apis.py +67 -0
  202. evalscope/models/modelscope.py +455 -0
  203. evalscope/models/openai_compatible.py +126 -0
  204. evalscope/models/text2image_model.py +124 -0
  205. evalscope/models/utils/openai.py +701 -0
  206. evalscope/perf/benchmark.py +4 -1
  207. evalscope/perf/http_client.py +4 -2
  208. evalscope/perf/plugin/api/custom_api.py +5 -4
  209. evalscope/perf/plugin/api/openai_api.py +11 -9
  210. evalscope/perf/plugin/datasets/custom.py +2 -1
  211. evalscope/perf/plugin/datasets/flickr8k.py +1 -1
  212. evalscope/perf/plugin/datasets/kontext_bench.py +1 -1
  213. evalscope/perf/plugin/datasets/line_by_line.py +2 -1
  214. evalscope/perf/plugin/datasets/longalpaca.py +2 -1
  215. evalscope/perf/plugin/datasets/openqa.py +4 -2
  216. evalscope/perf/utils/benchmark_util.py +15 -10
  217. evalscope/perf/utils/db_util.py +9 -6
  218. evalscope/perf/utils/local_server.py +11 -3
  219. evalscope/perf/utils/rich_display.py +16 -10
  220. evalscope/report/__init__.py +2 -3
  221. evalscope/report/combinator.py +18 -12
  222. evalscope/report/generator.py +51 -35
  223. evalscope/report/{utils.py → report.py} +8 -6
  224. evalscope/run.py +33 -47
  225. evalscope/summarizer.py +1 -1
  226. evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -4
  227. evalscope/utils/__init__.py +21 -2
  228. evalscope/utils/chat_service.py +3 -2
  229. evalscope/utils/deprecation_utils.py +12 -1
  230. evalscope/utils/function_utils.py +29 -0
  231. evalscope/utils/import_utils.py +23 -1
  232. evalscope/utils/io_utils.py +142 -6
  233. evalscope/utils/json_schema.py +208 -0
  234. evalscope/utils/logger.py +51 -12
  235. evalscope/utils/model_utils.py +11 -7
  236. evalscope/utils/multi_choices.py +288 -0
  237. evalscope/utils/url_utils.py +65 -0
  238. evalscope/version.py +2 -2
  239. {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/METADATA +108 -62
  240. {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/RECORD +258 -226
  241. tests/benchmark/test_eval.py +385 -0
  242. tests/benchmark/test_image_edit.py +65 -0
  243. tests/{aigc → benchmark}/test_t2i.py +22 -4
  244. tests/benchmark/test_vlm.py +80 -0
  245. tests/cli/test_all.py +85 -47
  246. tests/cli/test_collection.py +20 -8
  247. tests/cli/test_custom.py +22 -15
  248. tests/cli/test_reasoning.py +81 -0
  249. tests/common.py +73 -0
  250. tests/perf/test_perf.py +4 -2
  251. tests/rag/test_clip_benchmark.py +0 -2
  252. evalscope/benchmarks/aigc/t2i/base.py +0 -56
  253. evalscope/benchmarks/aigc/t2i/evalmuse_adapter.py +0 -78
  254. evalscope/benchmarks/aigc/t2i/genai_bench_adapter.py +0 -58
  255. evalscope/benchmarks/aigc/t2i/general_t2i_adapter.py +0 -58
  256. evalscope/benchmarks/aigc/t2i/hpdv2_adapter.py +0 -57
  257. evalscope/benchmarks/aigc/t2i/tifa_adapter.py +0 -37
  258. evalscope/benchmarks/arc/ai2_arc.py +0 -151
  259. evalscope/benchmarks/benchmark.py +0 -81
  260. evalscope/benchmarks/ceval/ceval_exam.py +0 -146
  261. evalscope/benchmarks/cmmlu/cmmlu.py +0 -161
  262. evalscope/benchmarks/cmmlu/samples.jsonl +0 -5
  263. evalscope/benchmarks/competition_math/competition_math.py +0 -79
  264. evalscope/benchmarks/data_adapter.py +0 -528
  265. evalscope/benchmarks/filters.py +0 -59
  266. evalscope/benchmarks/gsm8k/gsm8k.py +0 -121
  267. evalscope/benchmarks/hellaswag/hellaswag.py +0 -112
  268. evalscope/benchmarks/humaneval/humaneval.py +0 -79
  269. evalscope/benchmarks/mmlu/mmlu.py +0 -160
  270. evalscope/benchmarks/mmlu/samples.jsonl +0 -5
  271. evalscope/benchmarks/process_bench/critique_template.txt +0 -13
  272. evalscope/benchmarks/race/race.py +0 -104
  273. evalscope/benchmarks/race/samples.jsonl +0 -5
  274. evalscope/benchmarks/super_gpqa/zero_shot_prompt.txt +0 -4
  275. evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -89
  276. evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -163
  277. evalscope/benchmarks/utils.py +0 -60
  278. evalscope/collections/evaluator.py +0 -375
  279. evalscope/metrics/completion_parsers.py +0 -227
  280. evalscope/metrics/named_metrics.py +0 -55
  281. evalscope/models/adapters/__init__.py +0 -14
  282. evalscope/models/adapters/base_adapter.py +0 -84
  283. evalscope/models/adapters/bfcl_adapter.py +0 -246
  284. evalscope/models/adapters/chat_adapter.py +0 -207
  285. evalscope/models/adapters/choice_adapter.py +0 -222
  286. evalscope/models/adapters/custom_adapter.py +0 -71
  287. evalscope/models/adapters/server_adapter.py +0 -236
  288. evalscope/models/adapters/t2i_adapter.py +0 -79
  289. evalscope/models/adapters/tau_bench_adapter.py +0 -189
  290. evalscope/models/custom/__init__.py +0 -4
  291. evalscope/models/custom/custom_model.py +0 -50
  292. evalscope/models/custom/dummy_model.py +0 -99
  293. evalscope/models/local_model.py +0 -128
  294. evalscope/models/register.py +0 -41
  295. tests/cli/test_run.py +0 -489
  296. /evalscope/{benchmarks/aigc → api}/__init__.py +0 -0
  297. /evalscope/benchmarks/{aigc/t2i → image_edit}/__init__.py +0 -0
  298. {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/LICENSE +0 -0
  299. {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/WHEEL +0 -0
  300. {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/entry_points.txt +0 -0
  301. {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/top_level.txt +0 -0
  302. /tests/{aigc → benchmark}/__init__.py +0 -0
@@ -1,159 +1,46 @@
1
1
  # Copyright (c) Alibaba, Inc. and its affiliates.
2
2
 
3
- import json
4
- import os
5
-
6
- from evalscope.benchmarks import Benchmark, DataAdapter
7
- from evalscope.constants import EvalType, OutputType
8
- from evalscope.metrics import exact_match
9
- from evalscope.metrics.completion_parsers import ResponseParser
3
+ from evalscope.api.benchmark import BenchmarkMeta, MultiChoiceAdapter
4
+ from evalscope.api.dataset import Sample
5
+ from evalscope.api.registry import register_benchmark
6
+ from evalscope.constants import Tags
10
7
  from evalscope.utils.logger import get_logger
11
-
12
- # flake8: noqa
8
+ from evalscope.utils.multi_choices import MultipleChoiceTemplate
13
9
 
14
10
  logger = get_logger()
15
11
 
16
12
 
17
- @Benchmark.register(
18
- name='arc',
19
- pretty_name='ARC',
20
- tags=['Reasoning', 'MCQ'],
21
- description=
22
- 'The ARC (AI2 Reasoning Challenge) benchmark is designed to evaluate the reasoning capabilities of AI models through multiple-choice questions derived from science exams. It includes two subsets: ARC-Easy and ARC-Challenge, which vary in difficulty.', # noqa: E501
23
- dataset_id='modelscope/ai2_arc',
24
- model_adapter=OutputType.GENERATION,
25
- output_types=[OutputType.MULTIPLE_CHOICE, OutputType.GENERATION],
26
- subset_list=['ARC-Easy', 'ARC-Challenge'],
27
- metric_list=['AverageAccuracy'],
28
- few_shot_num=0,
29
- train_split='train',
30
- eval_split='test',
31
- prompt_template=
32
- 'Given the following question and four candidate answers (A, B, C and D), choose the best answer.\n{query}\nYour response should end with "The best answer is [the_answer_letter]" where the [the_answer_letter] is one of A, B, C or D.', # noqa
13
+ @register_benchmark(
14
+ BenchmarkMeta(
15
+ name='arc',
16
+ pretty_name='ARC',
17
+ tags=[Tags.REASONING, Tags.MULTIPLE_CHOICE],
18
+ description=
19
+ 'The ARC (AI2 Reasoning Challenge) benchmark is designed to evaluate the reasoning capabilities of AI models through multiple-choice questions derived from science exams. It includes two subsets: ARC-Easy and ARC-Challenge, which vary in difficulty.', # noqa: E501
20
+ dataset_id='allenai/ai2_arc',
21
+ subset_list=['ARC-Easy', 'ARC-Challenge'],
22
+ metric_list=['acc'],
23
+ few_shot_num=0,
24
+ train_split='train',
25
+ eval_split='test',
26
+ prompt_template=MultipleChoiceTemplate.SINGLE_ANSWER,
27
+ )
33
28
  )
34
- class ARCAdapter(DataAdapter):
29
+ class ARCAdapter(MultiChoiceAdapter):
35
30
 
36
31
  def __init__(self, **kwargs):
37
- few_shot_num = kwargs.get('few_shot_num', None)
38
- if few_shot_num is None:
39
- # Use 0-shot by default
40
- logger.info(f'Set 0-shot examples by system for ARC.')
41
- few_shot_num = 0
42
-
43
- if few_shot_num != 0:
44
- logger.warning(f'few_shot_num is recommended to set 0 for ARC, got {few_shot_num}.')
45
-
46
32
  super().__init__(**kwargs)
47
33
 
48
- self.choices = ['A', 'B', 'C', 'D']
49
-
50
- def load_from_disk(self, dataset_name_or_path, subset_list, work_dir, **kwargs) -> dict:
51
- """
52
- Load the dataset from local disk.
53
-
54
- dataset_name_or_path: str, the dataset id or path. e.g. 'arc'
55
- subset_list: list, the subset list to load. e.g. ['ARC-Easy', 'ARC-Challenge']
56
- work_dir: str, the local root data directory. e.g. '/path/to/data'
57
- kwargs: dict, other arguments.
58
- """
59
- data_dict = {}
60
- for subset_name in subset_list:
61
- if os.path.exists(dataset_name_or_path):
62
- subset_path = os.path.join(dataset_name_or_path, subset_name)
63
- else:
64
- subset_path = os.path.join(work_dir, dataset_name_or_path, subset_name)
65
- for split_name in ['Train', 'Test']:
66
- split_path = os.path.join(subset_path, f'{subset_name}-{split_name}.jsonl')
67
- if os.path.exists(split_path):
68
- with open(split_path, 'r', errors='ignore', encoding='utf-8') as in_f:
69
- rows = []
70
- for line in in_f:
71
- item = json.loads(line.strip())
72
- raw_choices = item['question']['choices']
73
- rows.append({
74
- 'id': item['id'],
75
- 'question': item['question']['stem'],
76
- 'choices': {
77
- 'text': [d['text'] for d in raw_choices],
78
- 'label': [d['label'] for d in raw_choices]
79
- },
80
- 'answerKey': item['answerKey'],
81
- })
82
-
83
- if subset_name in data_dict:
84
- data_dict[subset_name].update({split_name.lower(): rows})
85
- else:
86
- data_dict[subset_name] = {split_name.lower(): rows}
87
-
88
- return data_dict
89
-
90
- def gen_prompt(self, input_d: dict, few_shot_list: list, **kwargs) -> dict:
91
- """
92
- Generate model prompt from raw data, unify the prompt format for ARC benchmark.
93
-
94
- Args:
95
- input_d (dict): The raw input. A single data format of the ARC:
96
-
97
- {
98
- 'id': 'Mercury_7220990',
99
- 'question': 'Which factor will most likely cause a person to develop a fever?',
100
- 'choices':
101
- {
102
- 'text':['a leg muscle relaxing after exercise',
103
- 'a bacterial population in the bloodstream',
104
- 'several viral particles on the skin',
105
- 'carbohydrates being digested in the stomach'],
106
- 'label': ['A', 'B', 'C', 'D']
107
- },
108
- 'answerKey': 'B'
109
- }
110
-
111
- Returns:
112
- {'data': ['xxx'], 'multi_choices': ['A', 'B', 'C', 'D']}
113
- """
114
- few_shot_prompts = [self._generate_prompt(input_d=sample, include_answer=True) for sample in few_shot_list]
115
- context = '\n'.join(few_shot_prompts) + self._generate_prompt(input_d=input_d, include_answer=False)
116
-
117
- full_prompt = self.prompt_template.format(query=context)
118
-
119
- return self.gen_prompt_data(full_prompt)
120
-
121
- def get_gold_answer(self, input_d: dict) -> str:
122
- # Get the gold choice
123
- return input_d.get('answerKey', '')
124
-
125
- def parse_pred_result(self, result: str, raw_input_d: dict = None, eval_type: str = EvalType.CHECKPOINT) -> str:
126
- """
127
- Parse the model output to get the answer. Could be the best choice index.
128
-
129
- Args:
130
- result: Predicted answer from the model. Usually a string for chat.
131
- raw_input_d (dict): The raw input. Depending on the dataset.
132
- eval_type: 'checkpoint' or 'service' or `custom`, default: 'checkpoint'
133
-
134
- Returns:
135
- The parsed answer. Depending on the dataset. Usually a string for chat.
136
- """
137
- if self.model_adapter == OutputType.MULTIPLE_CHOICE:
138
- return result
139
- else:
140
- return ResponseParser.parse_first_option(text=result, options=self.choices)
141
-
142
- def match(self, gold: str, pred: str) -> float:
143
- return exact_match(gold=gold, pred=pred)
144
-
145
- @classmethod
146
- def _generate_prompt(cls, input_d: dict, include_answer=True) -> str:
147
-
148
- example: str = input_d['question']
149
-
150
- choices_texts: list = input_d['choices']['text']
151
- choices_labels: list = input_d['choices']['label']
152
- choices_prompts: str = '\n'.join([label + '. ' + text for text, label in zip(choices_texts, choices_labels)])
153
- example += '\n' + choices_prompts
154
-
155
- if include_answer:
156
- example += '\nAnswer:'
157
- example += ' {}\n\n'.format(input_d['answerKey'])
158
-
159
- return example
34
+ def record_to_sample(self, record) -> Sample:
35
+ # Convert choice labels to indices (A->0, B->1, etc.)
36
+ choice_texts = record['choices']['text']
37
+ answer_key = record['answerKey']
38
+
39
+ return Sample(
40
+ input=record['question'],
41
+ choices=choice_texts,
42
+ target=answer_key,
43
+ metadata={
44
+ 'id': record.get('id', ''),
45
+ },
46
+ )
@@ -1,75 +1,97 @@
1
- from typing import Any, List
2
-
3
- from evalscope.benchmarks import Benchmark, DataAdapter
4
- from evalscope.metrics import LLMJudge, Metric, mean, metric_registry
1
+ # flake8: noqa: E501
2
+ from typing import Any, Dict, List
3
+
4
+ from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
5
+ from evalscope.api.dataset import Sample
6
+ from evalscope.api.evaluator import TaskState
7
+ from evalscope.api.metric import AggScore, SampleScore, Score
8
+ from evalscope.api.registry import register_benchmark
9
+ from evalscope.constants import Tags
5
10
  from evalscope.utils.logger import get_logger
6
11
 
7
- # flake8: noqa
8
-
9
12
  logger = get_logger()
10
13
 
11
- GRADER_SYSTEM_PROMPT = "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt displayed below. You will be given assistant A's answer and assistant B's answer. Your job is to evaluate which assistant's answer is better.\n\nBegin your evaluation by generating your own answer to the prompt. You must provide your answers before judging any answers.\n\nWhen evaluating the assistants' answers, compare both assistants' answers with your answer. You must identify and correct any mistakes or inaccurate information.\n\nThen consider if the assistant's answers are helpful, relevant, and concise. Helpful means the answer correctly responds to the prompt or follows the instructions. Note when user prompt has any ambiguity or more than one interpretation, it is more helpful and appropriate to ask for clarifications or more information from the user than providing an answer based on assumptions. Relevant means all parts of the response closely connect or are appropriate to what is being asked. Concise means the response is clear and not verbose or excessive.\n\nThen consider the creativity and novelty of the assistant's answers when needed. Finally, identify any missing important information in the assistants' answers that would be beneficial to include when responding to the user prompt.\n\nAfter providing your explanation, you must output only one of the following choices as your final verdict with a label:\n\n1. Assistant A is significantly better: [[A>>B]]\n2. Assistant A is slightly better: [[A>B]]\n3. Tie, relatively the same: [[A=B]]\n4. Assistant B is slightly better: [[B>A]]\n5. Assistant B is significantly better: [[B>>A]]\n\nExample output: \"My final verdict is tie: [[A=B]]\"." # noqa: E501
12
-
13
- GRADER_TEMPLATE = "<|User Prompt|>\n{question}\n\n<|The Start of Assistant A's Answer|>\n{answer_1}\n<|The End of Assistant A's Answer|>\n\n<|The Start of Assistant B's Answer|>\n{answer_2}\n<|The End of Assistant B's Answer|>".strip(
14
- ) # noqa: E501
15
-
16
-
17
- @Benchmark.register(
18
- name='arena_hard',
19
- pretty_name='ArenaHard',
20
- tags=['Instruction-Following', 'Arena'],
21
- description=
22
- 'ArenaHard is a benchmark designed to evaluate the performance of large language models in a competitive setting, '
23
- 'where models are pitted against each other in a series of tasks to determine their relative strengths and weaknesses. '
24
- 'It includes a set of challenging tasks that require reasoning, understanding, and generation capabilities. '
25
- 'Currently not support `style-controlled winrate`; the official Judge model is `gpt-4-1106-preview`, while the baseline model is `gpt-4-0314`.', # noqa: E501
26
- dataset_id='AI-ModelScope/arena-hard-auto-v0.1',
27
- metric_list=['winrate'],
28
- few_shot_num=0,
29
- train_split=None,
30
- eval_split='test')
31
- class ArenaHardAdapter(DataAdapter):
14
+ GRADER_SYSTEM_PROMPT = """Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt displayed below. You will be given assistant A's answer and assistant B's answer. Your job is to evaluate which assistant's answer is better.\n\nBegin your evaluation by generating your own answer to the prompt. You must provide your answers before judging any answers.\n\nWhen evaluating the assistants' answers, compare both assistants' answers with your answer. You must identify and correct any mistakes or inaccurate information.\n\nThen consider if the assistant's answers are helpful, relevant, and concise. Helpful means the answer correctly responds to the prompt or follows the instructions. Note when user prompt has any ambiguity or more than one interpretation, it is more helpful and appropriate to ask for clarifications or more information from the user than providing an answer based on assumptions. Relevant means all parts of the response closely connect or are appropriate to what is being asked. Concise means the response is clear and not verbose or excessive.\n\nThen consider the creativity and novelty of the assistant's answers when needed. Finally, identify any missing important information in the assistants' answers that would be beneficial to include when responding to the user prompt.\n\nAfter providing your explanation, you must output only one of the following choices as your final verdict with a label:\n\n1. Assistant A is significantly better: [[A>>B]]\n2. Assistant A is slightly better: [[A>B]]\n3. Tie, relatively the same: [[A=B]]\n4. Assistant B is slightly better: [[B>A]]\n5. Assistant B is significantly better: [[B>>A]]\n\nExample output: \"My final verdict is tie: [[A=B]]\".""" # noqa: E501
15
+
16
+ GRADER_TEMPLATE = """<|User Prompt|>\n{question}\n\n<|The Start of Assistant A's Answer|>\n{answer_1}\n<|The End of Assistant A's Answer|>\n\n<|The Start of Assistant B's Answer|>\n{answer_2}\n<|The End of Assistant B's Answer|>""".strip(
17
+ )
18
+
19
+
20
+ @register_benchmark(
21
+ BenchmarkMeta(
22
+ name='arena_hard',
23
+ pretty_name='ArenaHard',
24
+ tags=[Tags.INSTRUCTION_FOLLOWING, Tags.ARENA],
25
+ description=
26
+ 'ArenaHard is a benchmark designed to evaluate the performance of large language models in a competitive setting, '
27
+ 'where models are pitted against each other in a series of tasks to determine their relative strengths and weaknesses. '
28
+ 'It includes a set of challenging tasks that require reasoning, understanding, and generation capabilities. '
29
+ 'Currently not support `style-controlled winrate`; the official Judge model is `gpt-4-1106-preview`, while the baseline model is `gpt-4-0314`.',
30
+ dataset_id='AI-ModelScope/arena-hard-auto-v0.1',
31
+ metric_list=['winrate'],
32
+ few_shot_num=0,
33
+ train_split=None,
34
+ eval_split='test',
35
+ prompt_template='{question}'
36
+ )
37
+ )
38
+ class ArenaHardAdapter(DefaultDataAdapter):
32
39
 
33
40
  def __init__(self, *args, **kwargs):
34
41
  super().__init__(*args, **kwargs)
35
42
 
36
- # register metrics
37
- metric_registry.register(Metric(name='winrate', object=mean))
38
-
39
- # whether to use LLM as a judge
40
- self.llm_as_a_judge = True
43
+ self._use_llm_judge = True # Use LLM as a judge by default
41
44
 
42
- def gen_prompt(self, input_d: dict, subset_name: str, few_shot_list: list, **kwargs) -> dict:
43
- question = input_d['question']
44
- return self.gen_prompt_data(question)
45
-
46
- def get_gold_answer(self, input_d: dict) -> str:
47
- return input_d['prediction']
48
-
49
- def parse_pred_result(self, result: str, raw_input_d: dict = None, **kwargs) -> str:
50
- return result.strip()
51
-
52
- def match(self, gold: str, pred: str):
53
- # simple match
54
- logger.warning(f'Please use LLMJudge to match the result for {self.name}')
55
- return None
45
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
46
+ """
47
+ Convert a data record to a Sample object.
56
48
 
57
- def llm_match(self, gold: Any, pred: Any, judge: LLMJudge, **kwargs) -> dict:
58
- from .utils import post_process_arenahard
49
+ Args:
50
+ record (Dict[str, Any]): Input data record.
59
51
 
60
- raw_input = kwargs.get('raw_input', None)
61
- question = raw_input['question']
62
- # gold is baseline answer 'A', pred is model answer 'B'
63
- prompt1 = GRADER_TEMPLATE.format(question=question, answer_1=gold, answer_2=pred)
52
+ Returns:
53
+ Sample: Sample object with input, target, and metadata.
54
+ """
55
+ question = record['question']
56
+ baseline_prediction = record['prediction'] # baseline model prediction
57
+
58
+ return Sample(
59
+ input=question, target=baseline_prediction, metadata={'capability': record.get('capability', 'unknown')}
60
+ )
61
+
62
+ def llm_match_score(
63
+ self,
64
+ original_prediction: str,
65
+ filtered_prediction: str,
66
+ reference: str,
67
+ task_state: TaskState,
68
+ ) -> Score:
69
+ from .utils import get_judge_score, post_process_arenahard
70
+
71
+ score = Score(
72
+ extracted_prediction=filtered_prediction,
73
+ prediction=original_prediction,
74
+ )
75
+
76
+ question = task_state.input_text
77
+
78
+ # reference is baseline answer 'A', filtered_prediction is model answer 'B'
79
+ prompt1 = GRADER_TEMPLATE.format(question=question, answer_1=reference, answer_2=filtered_prediction)
64
80
  # reverse the order
65
- prompt2 = GRADER_TEMPLATE.format(question=question, answer_1=pred, answer_2=gold)
81
+ prompt2 = GRADER_TEMPLATE.format(question=question, answer_1=filtered_prediction, answer_2=reference)
82
+
66
83
  # get grading response
67
- game1_response = judge(prompt1, system_prompt=GRADER_SYSTEM_PROMPT)
68
- game2_response = judge(prompt2, system_prompt=GRADER_SYSTEM_PROMPT)
84
+ game1_response = self.llm_judge.judge(prompt1, system_prompt=GRADER_SYSTEM_PROMPT)
85
+ game2_response = self.llm_judge.judge(prompt2, system_prompt=GRADER_SYSTEM_PROMPT)
86
+
69
87
  # parse grading response
70
88
  res1 = post_process_arenahard(game1_response)
71
89
  res2 = post_process_arenahard(game2_response)
72
- return {
90
+
91
+ score1 = get_judge_score(res1, reverse=True)
92
+ score2 = get_judge_score(res2, reverse=False)
93
+
94
+ battle_result = {
73
95
  'model_a':
74
96
  'gpt4-0314',
75
97
  'model_b':
@@ -88,22 +110,26 @@ class ArenaHardAdapter(DataAdapter):
88
110
  ]
89
111
  }
90
112
 
91
- def compute_metric(self, review_res_list: List[dict], **kwargs) -> List[dict]:
92
- """
93
- compute score of the model
94
- """
113
+ # Set score based on the battle result
114
+ score.value = {'score': (score1 + score2) / 2}
115
+ score.explanation = f'LLM judge battles: Game1: {game1_response[:100]}... Game2: {game2_response[:100]}...'
116
+ score.metadata = {
117
+ 'source': 'llm_judge',
118
+ 'judge_strategy': self.judge_strategy,
119
+ 'model': self.llm_judge.model_id,
120
+ 'battle_result': battle_result
121
+ }
122
+ return score
123
+
124
+ def aggregate_scores(self, sample_scores: List[SampleScore]) -> List[AggScore]:
95
125
  import pandas as pd
96
126
 
97
127
  from .utils import compute_mle_elo, get_battles_from_row, get_bootstrap_result, get_win_rate_column
98
128
 
99
- if isinstance(review_res_list[0], list):
100
- review_res_list = [item for sublist in review_res_list for item in sublist]
101
-
102
- battles = pd.concat([get_battles_from_row(res) for res in review_res_list])
129
+ battles = pd.concat([get_battles_from_row(res.score.metadata['battle_result']) for res in sample_scores])
103
130
 
104
131
  bootstrap_online_elo = compute_mle_elo(battles)
105
132
 
106
- # bootstrap_elo_lu = get_bootstrap_result(battles, compute_mle_elo, 100)
107
133
  stats = pd.DataFrame()
108
134
  stats['results'] = None
109
135
  stats['results'] = stats['results'].astype('object')
@@ -112,11 +138,11 @@ class ArenaHardAdapter(DataAdapter):
112
138
  # assert model in bootstrap_elo_lu.columns
113
139
  stats.at[i, 'model'] = model
114
140
  stats.at[i, 'score'] = bootstrap_online_elo[model]
115
- # stats.at[i, "lower"] = np.percentile(bootstrap_elo_lu[model], 2.5)
116
- # stats.at[i, "upper"] = np.percentile(bootstrap_elo_lu[model], 97.5)
117
-
118
- # stats['score'] = get_win_rate_column(stats, 'score', 'gpt4-0314').tolist()
119
141
 
120
142
  score = get_win_rate_column(stats, 'score', 'gpt4-0314').at['test_model']
121
143
 
122
- return [{'metric_name': 'winrate', 'score': score, 'num': len(review_res_list)}]
144
+ return [AggScore(
145
+ score=score,
146
+ metric_name='winrate',
147
+ num=len(sample_scores),
148
+ )]
@@ -19,6 +19,41 @@ def post_process_arenahard(completion):
19
19
  return None
20
20
 
21
21
 
22
+ def get_judge_score(result, reverse=False):
23
+ """
24
+ Calculate the judge score, considering confidence weight.
25
+
26
+ Args:
27
+ result: Judgment result ('A=B', 'A>B', 'A>>B', 'B>A', 'B>>A')
28
+ reverse: Whether to reverse the score
29
+
30
+ Returns:
31
+ float: Weighted score
32
+ """
33
+
34
+ # Base score mapping - using finer-grained scores
35
+ if not reverse:
36
+ score_mapping = {
37
+ 'A=B': 0.5, # Tie
38
+ 'A>B': 0.75, # A slightly wins
39
+ 'A>>B': 1.0, # A significantly wins
40
+ 'B>A': 0.25, # B slightly wins
41
+ 'B>>A': 0.0, # B significantly wins
42
+ }
43
+ else:
44
+ score_mapping = {
45
+ 'A=B': 0.5, # Tie
46
+ 'A>B': 0.25, # A slightly wins
47
+ 'A>>B': 0.0, # A significantly wins
48
+ 'B>A': 0.75, # B slightly wins
49
+ 'B>>A': 1.0, # B significantly wins
50
+ }
51
+
52
+ base_score = score_mapping.get(result, 0.5)
53
+
54
+ return base_score
55
+
56
+
22
57
  def get_battles_from_row(row, first_game_only=False, multiplier=3):
23
58
  results = []
24
59
  output = {'model_a': row['model_a'], 'model_b': row['model_b']}
@@ -106,7 +141,8 @@ def compute_mle_elo(df, SCALE=400, BASE=10, INIT_RATING=1000):
106
141
  return elo_scores.sort_values(ascending=False)
107
142
 
108
143
  lr = LogisticRegression(
109
- fit_intercept=False, penalty=None, tol=1e-8) # May need to set a small value when not use GPT4 as judge model
144
+ fit_intercept=False, penalty=None, tol=1e-8
145
+ ) # May need to set a small value when not use GPT4 as judge model
110
146
  lr.fit(X, Y)
111
147
 
112
148
  elo_scores = SCALE * lr.coef_[0] + INIT_RATING