evalscope 0.17.1__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (273) hide show
  1. evalscope/__init__.py +4 -1
  2. evalscope/api/__init__.py +0 -0
  3. evalscope/api/benchmark/__init__.py +3 -0
  4. evalscope/api/benchmark/adapters/__init__.py +3 -0
  5. evalscope/api/benchmark/adapters/default_data_adapter.py +683 -0
  6. evalscope/api/benchmark/adapters/multi_choice_adapter.py +83 -0
  7. evalscope/api/benchmark/adapters/text2image_adapter.py +155 -0
  8. evalscope/api/benchmark/benchmark.py +321 -0
  9. evalscope/api/benchmark/meta.py +115 -0
  10. evalscope/api/dataset/__init__.py +2 -0
  11. evalscope/api/dataset/dataset.py +349 -0
  12. evalscope/api/dataset/loader.py +261 -0
  13. evalscope/api/dataset/utils.py +143 -0
  14. evalscope/api/evaluator/__init__.py +3 -0
  15. evalscope/api/evaluator/cache.py +355 -0
  16. evalscope/api/evaluator/evaluator.py +56 -0
  17. evalscope/api/evaluator/state.py +264 -0
  18. evalscope/api/filter/__init__.py +1 -0
  19. evalscope/api/filter/filter.py +72 -0
  20. evalscope/api/messages/__init__.py +11 -0
  21. evalscope/api/messages/chat_message.py +198 -0
  22. evalscope/api/messages/content.py +102 -0
  23. evalscope/api/messages/utils.py +35 -0
  24. evalscope/api/metric/__init__.py +2 -0
  25. evalscope/api/metric/metric.py +55 -0
  26. evalscope/api/metric/scorer.py +105 -0
  27. evalscope/api/mixin/__init__.py +2 -0
  28. evalscope/api/mixin/dataset_mixin.py +105 -0
  29. evalscope/api/mixin/llm_judge_mixin.py +168 -0
  30. evalscope/api/model/__init__.py +12 -0
  31. evalscope/api/model/generate_config.py +157 -0
  32. evalscope/api/model/model.py +383 -0
  33. evalscope/api/model/model_output.py +285 -0
  34. evalscope/api/registry.py +182 -0
  35. evalscope/api/tool/__init__.py +3 -0
  36. evalscope/api/tool/tool_call.py +101 -0
  37. evalscope/api/tool/tool_info.py +173 -0
  38. evalscope/api/tool/utils.py +64 -0
  39. evalscope/app/ui/app_ui.py +2 -1
  40. evalscope/app/ui/multi_model.py +50 -25
  41. evalscope/app/ui/single_model.py +23 -11
  42. evalscope/app/utils/data_utils.py +42 -26
  43. evalscope/app/utils/text_utils.py +0 -2
  44. evalscope/app/utils/visualization.py +9 -4
  45. evalscope/arguments.py +6 -7
  46. evalscope/backend/opencompass/api_meta_template.py +2 -1
  47. evalscope/backend/opencompass/backend_manager.py +6 -3
  48. evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +10 -10
  49. evalscope/backend/rag_eval/clip_benchmark/task_template.py +8 -4
  50. evalscope/backend/rag_eval/ragas/task_template.py +2 -1
  51. evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +2 -1
  52. evalscope/backend/rag_eval/ragas/tasks/build_transform.py +7 -4
  53. evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +2 -1
  54. evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +2 -1
  55. evalscope/backend/rag_eval/utils/embedding.py +2 -1
  56. evalscope/backend/rag_eval/utils/llm.py +13 -12
  57. evalscope/benchmarks/__init__.py +0 -2
  58. evalscope/benchmarks/aigc/i2i/__init__.py +0 -0
  59. evalscope/benchmarks/aigc/i2i/general_i2i_adapter.py +44 -0
  60. evalscope/benchmarks/aigc/t2i/evalmuse_adapter.py +53 -55
  61. evalscope/benchmarks/aigc/t2i/genai_bench_adapter.py +41 -46
  62. evalscope/benchmarks/aigc/t2i/general_t2i_adapter.py +29 -45
  63. evalscope/benchmarks/aigc/t2i/hpdv2_adapter.py +34 -44
  64. evalscope/benchmarks/aigc/t2i/tifa_adapter.py +16 -27
  65. evalscope/benchmarks/aime/aime24_adapter.py +38 -40
  66. evalscope/benchmarks/aime/aime25_adapter.py +34 -40
  67. evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +86 -60
  68. evalscope/benchmarks/arc/arc_adapter.py +34 -147
  69. evalscope/benchmarks/arena_hard/arena_hard_adapter.py +96 -70
  70. evalscope/benchmarks/arena_hard/utils.py +37 -1
  71. evalscope/benchmarks/bbh/bbh_adapter.py +72 -144
  72. evalscope/benchmarks/bfcl/bfcl_adapter.py +181 -160
  73. evalscope/benchmarks/bfcl/generation.py +222 -0
  74. evalscope/benchmarks/ceval/ceval_adapter.py +94 -162
  75. evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +85 -82
  76. evalscope/benchmarks/cmmlu/cmmlu_adapter.py +34 -125
  77. evalscope/benchmarks/competition_math/competition_math_adapter.py +56 -108
  78. evalscope/benchmarks/data_collection/data_collection_adapter.py +183 -45
  79. evalscope/benchmarks/docmath/docmath_adapter.py +109 -51
  80. evalscope/benchmarks/docmath/utils.py +4 -5
  81. evalscope/benchmarks/drop/drop_adapter.py +88 -40
  82. evalscope/benchmarks/frames/frames_adapter.py +135 -52
  83. evalscope/benchmarks/general_arena/general_arena_adapter.py +136 -98
  84. evalscope/benchmarks/general_arena/utils.py +23 -27
  85. evalscope/benchmarks/general_mcq/general_mcq_adapter.py +40 -101
  86. evalscope/benchmarks/general_qa/general_qa_adapter.py +73 -134
  87. evalscope/benchmarks/gpqa/gpqa_adapter.py +61 -100
  88. evalscope/benchmarks/gpqa/{chain_of_thought.txt → prompt.py} +12 -5
  89. evalscope/benchmarks/gsm8k/gsm8k_adapter.py +62 -142
  90. evalscope/benchmarks/hellaswag/hellaswag_adapter.py +35 -124
  91. evalscope/benchmarks/hle/hle_adapter.py +127 -93
  92. evalscope/benchmarks/humaneval/humaneval_adapter.py +86 -55
  93. evalscope/benchmarks/ifeval/ifeval_adapter.py +69 -40
  94. evalscope/benchmarks/ifeval/instructions.py +109 -64
  95. evalscope/benchmarks/ifeval/instructions_registry.py +1 -1
  96. evalscope/benchmarks/ifeval/utils.py +6 -7
  97. evalscope/benchmarks/iquiz/iquiz_adapter.py +30 -65
  98. evalscope/benchmarks/live_code_bench/evaluate_utils.py +2 -2
  99. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +121 -71
  100. evalscope/benchmarks/live_code_bench/load_utils.py +13 -21
  101. evalscope/benchmarks/live_code_bench/testing_util.py +6 -2
  102. evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +49 -75
  103. evalscope/benchmarks/math_500/math_500_adapter.py +41 -48
  104. evalscope/benchmarks/mmlu/mmlu_adapter.py +32 -205
  105. evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +80 -99
  106. evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +64 -110
  107. evalscope/benchmarks/musr/musr_adapter.py +33 -64
  108. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +192 -152
  109. evalscope/benchmarks/process_bench/process_bench_adapter.py +144 -76
  110. evalscope/benchmarks/race/race_adapter.py +33 -119
  111. evalscope/benchmarks/simple_qa/simple_qa_adapter.py +72 -70
  112. evalscope/benchmarks/super_gpqa/{five_shot_prompt.txt → prompt.py} +14 -16
  113. evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +73 -117
  114. evalscope/benchmarks/super_gpqa/utils.py +2 -1
  115. evalscope/benchmarks/tau_bench/generation.py +147 -0
  116. evalscope/benchmarks/tau_bench/tau_bench_adapter.py +112 -54
  117. evalscope/benchmarks/tool_bench/tool_bench_adapter.py +91 -70
  118. evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +56 -124
  119. evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +70 -265
  120. evalscope/benchmarks/winogrande/winogrande_adapter.py +28 -54
  121. evalscope/cli/cli.py +2 -0
  122. evalscope/cli/start_server.py +6 -3
  123. evalscope/collections/__init__.py +2 -10
  124. evalscope/collections/sampler.py +10 -10
  125. evalscope/collections/schema.py +13 -11
  126. evalscope/config.py +95 -54
  127. evalscope/constants.py +29 -61
  128. evalscope/evaluator/__init__.py +1 -1
  129. evalscope/evaluator/evaluator.py +277 -423
  130. evalscope/filters/__init__.py +2 -0
  131. evalscope/filters/extraction.py +126 -0
  132. evalscope/filters/selection.py +57 -0
  133. evalscope/metrics/__init__.py +13 -13
  134. evalscope/metrics/llm_judge.py +32 -30
  135. evalscope/metrics/math_parser.py +27 -22
  136. evalscope/metrics/metric.py +307 -0
  137. evalscope/metrics/metrics.py +22 -18
  138. evalscope/metrics/t2v_metrics/__init__.py +0 -52
  139. evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +4 -2
  140. evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +9 -13
  141. evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +2 -1
  142. evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +3 -2
  143. evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +2 -1
  144. evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +2 -2
  145. evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +2 -1
  146. evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +4 -2
  147. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +10 -5
  148. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +4 -2
  149. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +2 -1
  150. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +15 -9
  151. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +4 -2
  152. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +15 -10
  153. evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +9 -6
  154. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +2 -2
  155. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +4 -2
  156. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +4 -2
  157. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +3 -9
  158. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +16 -10
  159. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +3 -2
  160. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +4 -2
  161. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +8 -4
  162. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +47 -25
  163. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +12 -7
  164. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +23 -17
  165. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +33 -23
  166. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +2 -1
  167. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +46 -30
  168. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +69 -37
  169. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +7 -5
  170. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +6 -4
  171. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +7 -5
  172. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +3 -2
  173. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +5 -2
  174. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +17 -13
  175. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +35 -19
  176. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +14 -12
  177. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +63 -52
  178. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +63 -38
  179. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +6 -3
  180. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +6 -2
  181. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +3 -2
  182. evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +15 -13
  183. evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +3 -2
  184. evalscope/models/__init__.py +6 -29
  185. evalscope/models/mockllm.py +65 -0
  186. evalscope/models/model_apis.py +47 -0
  187. evalscope/models/modelscope.py +455 -0
  188. evalscope/models/openai_compatible.py +123 -0
  189. evalscope/models/text2image_model.py +124 -0
  190. evalscope/models/utils/openai.py +698 -0
  191. evalscope/perf/benchmark.py +2 -1
  192. evalscope/perf/http_client.py +4 -2
  193. evalscope/perf/plugin/api/custom_api.py +5 -4
  194. evalscope/perf/plugin/api/openai_api.py +11 -9
  195. evalscope/perf/plugin/datasets/custom.py +2 -1
  196. evalscope/perf/plugin/datasets/flickr8k.py +1 -1
  197. evalscope/perf/plugin/datasets/kontext_bench.py +1 -1
  198. evalscope/perf/plugin/datasets/line_by_line.py +2 -1
  199. evalscope/perf/plugin/datasets/longalpaca.py +2 -1
  200. evalscope/perf/plugin/datasets/openqa.py +4 -2
  201. evalscope/perf/utils/benchmark_util.py +7 -5
  202. evalscope/perf/utils/db_util.py +9 -6
  203. evalscope/perf/utils/local_server.py +8 -3
  204. evalscope/perf/utils/rich_display.py +16 -10
  205. evalscope/report/__init__.py +2 -2
  206. evalscope/report/combinator.py +18 -12
  207. evalscope/report/generator.py +101 -6
  208. evalscope/report/{utils.py → report.py} +8 -6
  209. evalscope/run.py +26 -44
  210. evalscope/summarizer.py +1 -1
  211. evalscope/utils/__init__.py +21 -2
  212. evalscope/utils/chat_service.py +2 -1
  213. evalscope/utils/deprecation_utils.py +12 -1
  214. evalscope/utils/function_utils.py +29 -0
  215. evalscope/utils/io_utils.py +100 -5
  216. evalscope/utils/json_schema.py +208 -0
  217. evalscope/utils/logger.py +51 -12
  218. evalscope/utils/model_utils.py +10 -7
  219. evalscope/utils/multi_choices.py +271 -0
  220. evalscope/utils/url_utils.py +65 -0
  221. evalscope/version.py +2 -2
  222. {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/METADATA +98 -49
  223. {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/RECORD +234 -216
  224. tests/aigc/test_t2i.py +22 -4
  225. tests/benchmark/__init__.py +1 -0
  226. tests/benchmark/test_eval.py +386 -0
  227. tests/cli/test_all.py +3 -5
  228. tests/cli/test_collection.py +13 -4
  229. tests/cli/test_custom.py +22 -15
  230. tests/rag/test_clip_benchmark.py +1 -0
  231. evalscope/benchmarks/aigc/t2i/base.py +0 -56
  232. evalscope/benchmarks/arc/ai2_arc.py +0 -151
  233. evalscope/benchmarks/benchmark.py +0 -81
  234. evalscope/benchmarks/ceval/ceval_exam.py +0 -146
  235. evalscope/benchmarks/cmmlu/cmmlu.py +0 -161
  236. evalscope/benchmarks/cmmlu/samples.jsonl +0 -5
  237. evalscope/benchmarks/competition_math/competition_math.py +0 -79
  238. evalscope/benchmarks/data_adapter.py +0 -528
  239. evalscope/benchmarks/filters.py +0 -59
  240. evalscope/benchmarks/gsm8k/gsm8k.py +0 -121
  241. evalscope/benchmarks/hellaswag/hellaswag.py +0 -112
  242. evalscope/benchmarks/humaneval/humaneval.py +0 -79
  243. evalscope/benchmarks/mmlu/mmlu.py +0 -160
  244. evalscope/benchmarks/mmlu/samples.jsonl +0 -5
  245. evalscope/benchmarks/process_bench/critique_template.txt +0 -13
  246. evalscope/benchmarks/race/race.py +0 -104
  247. evalscope/benchmarks/race/samples.jsonl +0 -5
  248. evalscope/benchmarks/super_gpqa/zero_shot_prompt.txt +0 -4
  249. evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -89
  250. evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -163
  251. evalscope/benchmarks/utils.py +0 -60
  252. evalscope/collections/evaluator.py +0 -375
  253. evalscope/metrics/completion_parsers.py +0 -227
  254. evalscope/metrics/named_metrics.py +0 -55
  255. evalscope/models/adapters/__init__.py +0 -14
  256. evalscope/models/adapters/base_adapter.py +0 -84
  257. evalscope/models/adapters/bfcl_adapter.py +0 -246
  258. evalscope/models/adapters/chat_adapter.py +0 -207
  259. evalscope/models/adapters/choice_adapter.py +0 -222
  260. evalscope/models/adapters/custom_adapter.py +0 -71
  261. evalscope/models/adapters/server_adapter.py +0 -236
  262. evalscope/models/adapters/t2i_adapter.py +0 -79
  263. evalscope/models/adapters/tau_bench_adapter.py +0 -189
  264. evalscope/models/custom/__init__.py +0 -4
  265. evalscope/models/custom/custom_model.py +0 -50
  266. evalscope/models/custom/dummy_model.py +0 -99
  267. evalscope/models/local_model.py +0 -128
  268. evalscope/models/register.py +0 -41
  269. tests/cli/test_run.py +0 -489
  270. {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/LICENSE +0 -0
  271. {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/WHEEL +0 -0
  272. {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/entry_points.txt +0 -0
  273. {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/top_level.txt +0 -0
@@ -1,75 +1,97 @@
1
- from typing import Any, List
2
-
3
- from evalscope.benchmarks import Benchmark, DataAdapter
4
- from evalscope.metrics import LLMJudge, Metric, mean, metric_registry
1
+ # flake8: noqa: E501
2
+ from typing import Any, Dict, List
3
+
4
+ from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
5
+ from evalscope.api.dataset import Sample
6
+ from evalscope.api.evaluator import TaskState
7
+ from evalscope.api.metric import AggScore, SampleScore, Score
8
+ from evalscope.api.registry import register_benchmark
9
+ from evalscope.constants import Tags
5
10
  from evalscope.utils.logger import get_logger
6
11
 
7
- # flake8: noqa
8
-
9
12
  logger = get_logger()
10
13
 
11
- GRADER_SYSTEM_PROMPT = "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt displayed below. You will be given assistant A's answer and assistant B's answer. Your job is to evaluate which assistant's answer is better.\n\nBegin your evaluation by generating your own answer to the prompt. You must provide your answers before judging any answers.\n\nWhen evaluating the assistants' answers, compare both assistants' answers with your answer. You must identify and correct any mistakes or inaccurate information.\n\nThen consider if the assistant's answers are helpful, relevant, and concise. Helpful means the answer correctly responds to the prompt or follows the instructions. Note when user prompt has any ambiguity or more than one interpretation, it is more helpful and appropriate to ask for clarifications or more information from the user than providing an answer based on assumptions. Relevant means all parts of the response closely connect or are appropriate to what is being asked. Concise means the response is clear and not verbose or excessive.\n\nThen consider the creativity and novelty of the assistant's answers when needed. Finally, identify any missing important information in the assistants' answers that would be beneficial to include when responding to the user prompt.\n\nAfter providing your explanation, you must output only one of the following choices as your final verdict with a label:\n\n1. Assistant A is significantly better: [[A>>B]]\n2. Assistant A is slightly better: [[A>B]]\n3. Tie, relatively the same: [[A=B]]\n4. Assistant B is slightly better: [[B>A]]\n5. Assistant B is significantly better: [[B>>A]]\n\nExample output: \"My final verdict is tie: [[A=B]]\"." # noqa: E501
12
-
13
- GRADER_TEMPLATE = "<|User Prompt|>\n{question}\n\n<|The Start of Assistant A's Answer|>\n{answer_1}\n<|The End of Assistant A's Answer|>\n\n<|The Start of Assistant B's Answer|>\n{answer_2}\n<|The End of Assistant B's Answer|>".strip(
14
- ) # noqa: E501
15
-
16
-
17
- @Benchmark.register(
18
- name='arena_hard',
19
- pretty_name='ArenaHard',
20
- tags=['Instruction-Following', 'Arena'],
21
- description=
22
- 'ArenaHard is a benchmark designed to evaluate the performance of large language models in a competitive setting, '
23
- 'where models are pitted against each other in a series of tasks to determine their relative strengths and weaknesses. '
24
- 'It includes a set of challenging tasks that require reasoning, understanding, and generation capabilities. '
25
- 'Currently not support `style-controlled winrate`; the official Judge model is `gpt-4-1106-preview`, while the baseline model is `gpt-4-0314`.', # noqa: E501
26
- dataset_id='AI-ModelScope/arena-hard-auto-v0.1',
27
- metric_list=['winrate'],
28
- few_shot_num=0,
29
- train_split=None,
30
- eval_split='test')
31
- class ArenaHardAdapter(DataAdapter):
14
+ GRADER_SYSTEM_PROMPT = """Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt displayed below. You will be given assistant A's answer and assistant B's answer. Your job is to evaluate which assistant's answer is better.\n\nBegin your evaluation by generating your own answer to the prompt. You must provide your answers before judging any answers.\n\nWhen evaluating the assistants' answers, compare both assistants' answers with your answer. You must identify and correct any mistakes or inaccurate information.\n\nThen consider if the assistant's answers are helpful, relevant, and concise. Helpful means the answer correctly responds to the prompt or follows the instructions. Note when user prompt has any ambiguity or more than one interpretation, it is more helpful and appropriate to ask for clarifications or more information from the user than providing an answer based on assumptions. Relevant means all parts of the response closely connect or are appropriate to what is being asked. Concise means the response is clear and not verbose or excessive.\n\nThen consider the creativity and novelty of the assistant's answers when needed. Finally, identify any missing important information in the assistants' answers that would be beneficial to include when responding to the user prompt.\n\nAfter providing your explanation, you must output only one of the following choices as your final verdict with a label:\n\n1. Assistant A is significantly better: [[A>>B]]\n2. Assistant A is slightly better: [[A>B]]\n3. Tie, relatively the same: [[A=B]]\n4. Assistant B is slightly better: [[B>A]]\n5. Assistant B is significantly better: [[B>>A]]\n\nExample output: \"My final verdict is tie: [[A=B]]\".""" # noqa: E501
15
+
16
+ GRADER_TEMPLATE = """<|User Prompt|>\n{question}\n\n<|The Start of Assistant A's Answer|>\n{answer_1}\n<|The End of Assistant A's Answer|>\n\n<|The Start of Assistant B's Answer|>\n{answer_2}\n<|The End of Assistant B's Answer|>""".strip(
17
+ )
18
+
19
+
20
+ @register_benchmark(
21
+ BenchmarkMeta(
22
+ name='arena_hard',
23
+ pretty_name='ArenaHard',
24
+ tags=[Tags.INSTRUCTION_FOLLOWING, Tags.ARENA],
25
+ description=
26
+ 'ArenaHard is a benchmark designed to evaluate the performance of large language models in a competitive setting, '
27
+ 'where models are pitted against each other in a series of tasks to determine their relative strengths and weaknesses. '
28
+ 'It includes a set of challenging tasks that require reasoning, understanding, and generation capabilities. '
29
+ 'Currently not support `style-controlled winrate`; the official Judge model is `gpt-4-1106-preview`, while the baseline model is `gpt-4-0314`.',
30
+ dataset_id='AI-ModelScope/arena-hard-auto-v0.1',
31
+ metric_list=['winrate'],
32
+ few_shot_num=0,
33
+ train_split=None,
34
+ eval_split='test',
35
+ prompt_template='{question}'
36
+ )
37
+ )
38
+ class ArenaHardAdapter(DefaultDataAdapter):
32
39
 
33
40
  def __init__(self, *args, **kwargs):
34
41
  super().__init__(*args, **kwargs)
35
42
 
36
- # register metrics
37
- metric_registry.register(Metric(name='winrate', object=mean))
38
-
39
- # whether to use LLM as a judge
40
- self.llm_as_a_judge = True
43
+ self._use_llm_judge = True # Use LLM as a judge by default
41
44
 
42
- def gen_prompt(self, input_d: dict, subset_name: str, few_shot_list: list, **kwargs) -> dict:
43
- question = input_d['question']
44
- return self.gen_prompt_data(question)
45
-
46
- def get_gold_answer(self, input_d: dict) -> str:
47
- return input_d['prediction']
48
-
49
- def parse_pred_result(self, result: str, raw_input_d: dict = None, **kwargs) -> str:
50
- return result.strip()
51
-
52
- def match(self, gold: str, pred: str):
53
- # simple match
54
- logger.warning(f'Please use LLMJudge to match the result for {self.name}')
55
- return None
45
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
46
+ """
47
+ Convert a data record to a Sample object.
56
48
 
57
- def llm_match(self, gold: Any, pred: Any, judge: LLMJudge, **kwargs) -> dict:
58
- from .utils import post_process_arenahard
49
+ Args:
50
+ record (Dict[str, Any]): Input data record.
59
51
 
60
- raw_input = kwargs.get('raw_input', None)
61
- question = raw_input['question']
62
- # gold is baseline answer 'A', pred is model answer 'B'
63
- prompt1 = GRADER_TEMPLATE.format(question=question, answer_1=gold, answer_2=pred)
52
+ Returns:
53
+ Sample: Sample object with input, target, and metadata.
54
+ """
55
+ question = record['question']
56
+ baseline_prediction = record['prediction'] # baseline model prediction
57
+
58
+ return Sample(
59
+ input=question, target=baseline_prediction, metadata={'capability': record.get('capability', 'unknown')}
60
+ )
61
+
62
+ def llm_match_score(
63
+ self,
64
+ original_prediction: str,
65
+ filtered_prediction: str,
66
+ reference: str,
67
+ task_state: TaskState,
68
+ ) -> Score:
69
+ from .utils import get_judge_score, post_process_arenahard
70
+
71
+ score = Score(
72
+ extracted_prediction=filtered_prediction,
73
+ prediction=original_prediction,
74
+ )
75
+
76
+ question = task_state.input_text
77
+
78
+ # reference is baseline answer 'A', filtered_prediction is model answer 'B'
79
+ prompt1 = GRADER_TEMPLATE.format(question=question, answer_1=reference, answer_2=filtered_prediction)
64
80
  # reverse the order
65
- prompt2 = GRADER_TEMPLATE.format(question=question, answer_1=pred, answer_2=gold)
81
+ prompt2 = GRADER_TEMPLATE.format(question=question, answer_1=filtered_prediction, answer_2=reference)
82
+
66
83
  # get grading response
67
- game1_response = judge(prompt1, system_prompt=GRADER_SYSTEM_PROMPT)
68
- game2_response = judge(prompt2, system_prompt=GRADER_SYSTEM_PROMPT)
84
+ game1_response = self.llm_judge.judge(prompt1, system_prompt=GRADER_SYSTEM_PROMPT)
85
+ game2_response = self.llm_judge.judge(prompt2, system_prompt=GRADER_SYSTEM_PROMPT)
86
+
69
87
  # parse grading response
70
88
  res1 = post_process_arenahard(game1_response)
71
89
  res2 = post_process_arenahard(game2_response)
72
- return {
90
+
91
+ score1 = get_judge_score(res1, reverse=True)
92
+ score2 = get_judge_score(res2, reverse=False)
93
+
94
+ battle_result = {
73
95
  'model_a':
74
96
  'gpt4-0314',
75
97
  'model_b':
@@ -88,22 +110,26 @@ class ArenaHardAdapter(DataAdapter):
88
110
  ]
89
111
  }
90
112
 
91
- def compute_metric(self, review_res_list: List[dict], **kwargs) -> List[dict]:
92
- """
93
- compute score of the model
94
- """
113
+ # Set score based on the battle result
114
+ score.value = {'score': (score1 + score2) / 2}
115
+ score.explanation = f'LLM judge battles: Game1: {game1_response[:100]}... Game2: {game2_response[:100]}...'
116
+ score.metadata = {
117
+ 'source': 'llm_judge',
118
+ 'judge_strategy': self.judge_strategy,
119
+ 'model': self.llm_judge.model_id,
120
+ 'battle_result': battle_result
121
+ }
122
+ return score
123
+
124
+ def aggregate_scores(self, sample_scores: List[SampleScore]) -> List[AggScore]:
95
125
  import pandas as pd
96
126
 
97
127
  from .utils import compute_mle_elo, get_battles_from_row, get_bootstrap_result, get_win_rate_column
98
128
 
99
- if isinstance(review_res_list[0], list):
100
- review_res_list = [item for sublist in review_res_list for item in sublist]
101
-
102
- battles = pd.concat([get_battles_from_row(res) for res in review_res_list])
129
+ battles = pd.concat([get_battles_from_row(res.score.metadata['battle_result']) for res in sample_scores])
103
130
 
104
131
  bootstrap_online_elo = compute_mle_elo(battles)
105
132
 
106
- # bootstrap_elo_lu = get_bootstrap_result(battles, compute_mle_elo, 100)
107
133
  stats = pd.DataFrame()
108
134
  stats['results'] = None
109
135
  stats['results'] = stats['results'].astype('object')
@@ -112,11 +138,11 @@ class ArenaHardAdapter(DataAdapter):
112
138
  # assert model in bootstrap_elo_lu.columns
113
139
  stats.at[i, 'model'] = model
114
140
  stats.at[i, 'score'] = bootstrap_online_elo[model]
115
- # stats.at[i, "lower"] = np.percentile(bootstrap_elo_lu[model], 2.5)
116
- # stats.at[i, "upper"] = np.percentile(bootstrap_elo_lu[model], 97.5)
117
-
118
- # stats['score'] = get_win_rate_column(stats, 'score', 'gpt4-0314').tolist()
119
141
 
120
142
  score = get_win_rate_column(stats, 'score', 'gpt4-0314').at['test_model']
121
143
 
122
- return [{'metric_name': 'winrate', 'score': score, 'num': len(review_res_list)}]
144
+ return [AggScore(
145
+ score=score,
146
+ metric_name='winrate',
147
+ num=len(sample_scores),
148
+ )]
@@ -19,6 +19,41 @@ def post_process_arenahard(completion):
19
19
  return None
20
20
 
21
21
 
22
+ def get_judge_score(result, reverse=False):
23
+ """
24
+ Calculate the judge score, considering confidence weight.
25
+
26
+ Args:
27
+ result: Judgment result ('A=B', 'A>B', 'A>>B', 'B>A', 'B>>A')
28
+ reverse: Whether to reverse the score
29
+
30
+ Returns:
31
+ float: Weighted score
32
+ """
33
+
34
+ # Base score mapping - using finer-grained scores
35
+ if not reverse:
36
+ score_mapping = {
37
+ 'A=B': 0.5, # Tie
38
+ 'A>B': 0.75, # A slightly wins
39
+ 'A>>B': 1.0, # A significantly wins
40
+ 'B>A': 0.25, # B slightly wins
41
+ 'B>>A': 0.0, # B significantly wins
42
+ }
43
+ else:
44
+ score_mapping = {
45
+ 'A=B': 0.5, # Tie
46
+ 'A>B': 0.25, # A slightly wins
47
+ 'A>>B': 0.0, # A significantly wins
48
+ 'B>A': 0.75, # B slightly wins
49
+ 'B>>A': 1.0, # B significantly wins
50
+ }
51
+
52
+ base_score = score_mapping.get(result, 0.5)
53
+
54
+ return base_score
55
+
56
+
22
57
  def get_battles_from_row(row, first_game_only=False, multiplier=3):
23
58
  results = []
24
59
  output = {'model_a': row['model_a'], 'model_b': row['model_b']}
@@ -106,7 +141,8 @@ def compute_mle_elo(df, SCALE=400, BASE=10, INIT_RATING=1000):
106
141
  return elo_scores.sort_values(ascending=False)
107
142
 
108
143
  lr = LogisticRegression(
109
- fit_intercept=False, penalty=None, tol=1e-8) # May need to set a small value when not use GPT4 as judge model
144
+ fit_intercept=False, penalty=None, tol=1e-8
145
+ ) # May need to set a small value when not use GPT4 as judge model
110
146
  lr.fit(X, Y)
111
147
 
112
148
  elo_scores = SCALE * lr.coef_[0] + INIT_RATING
@@ -1,17 +1,16 @@
1
1
  # Copyright (c) Alibaba, Inc. and its affiliates.
2
2
 
3
- import json
4
3
  import os
5
- import random
6
4
  import re
5
+ from typing import Any, Dict
7
6
 
8
- from evalscope.benchmarks import Benchmark, DataAdapter
9
- from evalscope.constants import AnswerKeys
10
- from evalscope.metrics import exact_match
7
+ from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
8
+ from evalscope.api.dataset import Sample
9
+ from evalscope.api.evaluator import TaskState
10
+ from evalscope.api.registry import register_benchmark
11
+ from evalscope.constants import Tags
11
12
  from evalscope.utils.logger import get_logger
12
13
 
13
- # flake8: noqa
14
-
15
14
  logger = get_logger()
16
15
 
17
16
  # BBH multiple choice subset list
@@ -55,160 +54,89 @@ FREE_FORM_LIST = [
55
54
  TASK_TYPE = 'task_type'
56
55
  SUBSET_LIST = MULTIPLE_CHOICE_LIST + FREE_FORM_LIST
57
56
 
58
-
59
- @Benchmark.register(
60
- name='bbh',
61
- pretty_name='BBH',
62
- tags=['Reasoning'],
63
- description=
64
- 'The BBH (Big Bench Hard) benchmark is a collection of challenging tasks designed to evaluate the reasoning capabilities of AI models. It includes both free-form and multiple-choice tasks, covering a wide range of reasoning skills.', # noqa: E501
65
- dataset_id='modelscope/bbh',
66
- subset_list=SUBSET_LIST,
67
- metric_list=['AverageAccuracy'],
68
- few_shot_num=3,
69
- train_split=None,
70
- eval_split='test',
71
- prompt_template="Q: {query}\nA: Let's think step by step.",
57
+ PROMPT_TEMPLATE = """
58
+ Q: {question}
59
+ A: Let's think step by step. Put your final answer in the format of "So the answer is $ANSWER" (without quotes and markdown) where $ANSWER is the answer to the problem.
60
+ """.lstrip() # noqa: E501
61
+
62
+ FEWSHOT_TEMPLATE = """
63
+ {fewshot}
64
+
65
+ """.lstrip() + PROMPT_TEMPLATE
66
+
67
+
68
+ @register_benchmark(
69
+ BenchmarkMeta(
70
+ name='bbh',
71
+ pretty_name='BBH',
72
+ dataset_id='evalscope/bbh',
73
+ tags=[Tags.REASONING],
74
+ description=
75
+ 'The BBH (Big Bench Hard) benchmark is a collection of challenging tasks designed to evaluate the reasoning capabilities of AI models. It includes both free-form and multiple-choice tasks, covering a wide range of reasoning skills.', # noqa: E501
76
+ subset_list=SUBSET_LIST,
77
+ few_shot_num=3,
78
+ train_split=None,
79
+ eval_split='test',
80
+ metric_list=['acc'],
81
+ prompt_template=PROMPT_TEMPLATE,
82
+ few_shot_prompt_template=FEWSHOT_TEMPLATE,
83
+ )
72
84
  )
73
- class BBHAdapter(DataAdapter):
85
+ class BBHAdapter(DefaultDataAdapter):
74
86
  """
75
87
  Adapter for BBH free-form and multiple-choices sub-tasks.
76
88
  """
77
89
 
78
90
  def __init__(self, **kwargs):
79
-
80
91
  few_shot_num = kwargs.get('few_shot_num', 3)
81
92
 
82
93
  if few_shot_num != 3 and few_shot_num != 0:
83
- logger.error(f'BBH uses 3-shot examples with CoT or 0-shot by system, but got {few_shot_num}. '
84
- f'Use 3-shot by default.')
94
+ logger.error(
95
+ f'BBH uses 3-shot examples with CoT or 0-shot by system, but got {few_shot_num}. '
96
+ f'Use 3-shot by default.'
97
+ )
85
98
  kwargs['few_shot_num'] = 3
86
99
 
87
100
  super().__init__(**kwargs)
88
101
 
89
- def load_from_disk(self, dataset_name_or_path, subset_list, work_dir, **kwargs) -> dict:
90
- data_dict = {}
91
- for subset_name in subset_list:
92
- for split_name in [self.eval_split]:
93
- if os.path.exists(dataset_name_or_path):
94
- file_path = os.path.join(dataset_name_or_path, f'{subset_name}.json')
95
- else:
96
- file_path: str = os.path.join(work_dir, dataset_name_or_path, f'{subset_name}.json')
97
- if os.path.exists(file_path):
98
- with open(file_path, 'r', encoding='utf-8') as f:
99
- examples = json.load(f)['examples']
100
- if subset_name in data_dict:
101
- data_dict[subset_name].update({split_name: examples})
102
- else:
103
- data_dict[subset_name] = {split_name: examples}
104
-
105
- return data_dict
106
-
107
- def gen_prompt(self, input_d: dict, few_shot_list: list, **kwargs) -> dict:
108
- """
109
- Generate model prompt from raw data, unify the prompt format for bbh(multiple choice) benchmark.
110
-
111
- Args:
112
- input_d (dict): The raw input. A single data format of the BBH:
113
-
114
- {
115
- 'input': '((-1 + 2 + 9 * 5) - (-2 + -4 + -4 * -7)) =',
116
- 'target': '24',
117
- }
118
-
119
- Returns:
120
- {'data': ['xxx']}
121
- """
122
- # few_shot_list: should be ['xxxx']
123
- if len(few_shot_list) > 0:
124
- cot_prompts = 'Follow the given examples and answer the question.\n' + few_shot_list[0]
125
- else:
126
- cot_prompts = ''
127
- full_prompt = cot_prompts + self.prompt_template.format(query=input_d['input'])
128
-
129
- return self.gen_prompt_data(full_prompt)
130
-
131
- def gen_prompts(self, data_dict: dict) -> dict:
132
- """
133
- Generate dataset prompts from raw input, unify the prompt format for different datasets.
134
-
135
- Args:
136
- data_dict: Refer to the output of load method: evalscope.benchmarks.benchmark.Benchmark.load
137
-
138
- Returns:
139
- {'subset_name': [prompt_d_1, prompt_d_2, ...]}
140
- prompt_d_i (dict): refer to the output of gen_prompt method.
141
-
142
- e.g. train -- few-shot data, test -- target dataset to evaluate.
143
- """
144
- res_dict: dict = {}
145
-
146
- if self.few_shot_num < 0:
147
- raise ValueError(f'Invalid shot_num: {self.few_shot_num} for few-shot evaluation.')
148
-
149
- logger.info(f'Use default settings: '
150
- f'> few_shot_num: {self.few_shot_num}, '
151
- f'> few_shot_split: {self.train_split}, '
152
- f'> target_eval_split: {self.eval_split}')
153
-
154
- for sub_name, sub_data_dict in data_dict.items():
155
- few_shot_data = []
156
- if self.few_shot_num > 0:
157
- with open(
158
- os.path.join(os.path.dirname(__file__), 'cot_prompts', f'{sub_name}.txt'), 'r',
159
- encoding='utf-8') as f:
160
- cot_prompt_str = f.read()
161
- few_shot_data = [cot_prompt_str]
162
-
163
- res_dict[sub_name] = []
164
- for sample_d in sub_data_dict[self.eval_split]:
165
- prompt_d = self.gen_prompt(input_d=sample_d, few_shot_list=few_shot_data)
166
- sample_d_new = sample_d.copy()
167
- if sub_name in MULTIPLE_CHOICE_LIST:
168
- sample_d_new[TASK_TYPE] = MULTIPLE_CHOICE
169
- elif sub_name in FREE_FORM_LIST:
170
- sample_d_new[TASK_TYPE] = FREE_FORM
171
- else:
172
- raise ValueError(f'Invalid subset name: {sub_name}')
173
-
174
- prompt_d[AnswerKeys.RAW_INPUT] = sample_d_new
175
- res_dict[sub_name].append(prompt_d)
176
-
177
- return res_dict
178
-
179
- def get_gold_answer(self, input_d: dict) -> str:
180
- # Get the gold choice
181
- gold = input_d.get('target', '')
182
- # remove brackets
183
- if gold is None:
184
- logger.error(f'BBHAdapter: gold is None.')
185
- gold = gold.replace('(', '').replace(')', '')
186
- return gold
187
-
188
- def parse_pred_result(self, result: str, raw_input_d: dict = None, eval_type: str = 'checkpoint') -> str:
189
- """
190
- Parse the model output to get the answer. Could be the best choice index.
191
-
192
- Args:
193
- result: Predicted answer from the model. Usually a string for chat.
194
- raw_input_d (dict): The raw input. Depending on the dataset.
195
- eval_type: 'checkpoint' or 'service' or `custom`, default: 'checkpoint'
196
-
197
- Returns:
198
- The parsed answer. Depending on the dataset. Usually a string for chat.
199
- """
200
- # Note: to use same extraction method for both of checkpoint/service/custom.
201
- task_type: str = raw_input_d.get(TASK_TYPE)
102
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
103
+ input = record['input']
104
+ target = record['target'].replace('(', '').replace(')', '').strip() # Clean up the target answer
105
+
106
+ # Determine task type based on subset name
107
+ task_type = None
108
+ subset_name = self.current_subset_name
109
+ if subset_name in MULTIPLE_CHOICE_LIST:
110
+ task_type = MULTIPLE_CHOICE
111
+ elif subset_name in FREE_FORM_LIST:
112
+ task_type = FREE_FORM
113
+
114
+ metadata = {TASK_TYPE: task_type}
115
+
116
+ return Sample(input=input, target=target, metadata=metadata, subset_key=subset_name)
117
+
118
+ def format_fewshot_template(self, fewshot: str, sample: Sample) -> str:
119
+ # Load CoT prompts from file for BBH
120
+ subset_name = sample.subset_key
121
+ if subset_name:
122
+ cot_file_path = os.path.join(os.path.dirname(__file__), 'cot_prompts', f'{subset_name}.txt')
123
+ if os.path.exists(cot_file_path):
124
+ with open(cot_file_path, 'r', encoding='utf-8') as f:
125
+ fewshot = f.read().strip()
126
+ return self.few_shot_prompt_template.format(
127
+ fewshot=fewshot,
128
+ question=sample.input,
129
+ )
130
+
131
+ def extract_answer(self, prediction: str, task_state: TaskState):
132
+ task_type = task_state.metadata.get(TASK_TYPE)
202
133
 
203
134
  if task_type == MULTIPLE_CHOICE:
204
- return self._extract_mc_answer(result)
135
+ return self._extract_mc_answer(prediction)
205
136
  elif task_type == FREE_FORM:
206
- return self._extract_ff_answer(result)
137
+ return self._extract_ff_answer(prediction)
207
138
  else:
208
- raise ValueError(f'Invalid task type: {task_type}')
209
-
210
- def match(self, gold: str, pred: str) -> float:
211
- return exact_match(gold=gold, pred=pred)
139
+ return prediction.strip()
212
140
 
213
141
  @classmethod
214
142
  def _extract_mc_answer(cls, ans: str) -> str: