evalscope 0.17.1__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- evalscope/__init__.py +4 -1
- evalscope/api/__init__.py +0 -0
- evalscope/api/benchmark/__init__.py +3 -0
- evalscope/api/benchmark/adapters/__init__.py +3 -0
- evalscope/api/benchmark/adapters/default_data_adapter.py +683 -0
- evalscope/api/benchmark/adapters/multi_choice_adapter.py +83 -0
- evalscope/api/benchmark/adapters/text2image_adapter.py +155 -0
- evalscope/api/benchmark/benchmark.py +321 -0
- evalscope/api/benchmark/meta.py +115 -0
- evalscope/api/dataset/__init__.py +2 -0
- evalscope/api/dataset/dataset.py +349 -0
- evalscope/api/dataset/loader.py +261 -0
- evalscope/api/dataset/utils.py +143 -0
- evalscope/api/evaluator/__init__.py +3 -0
- evalscope/api/evaluator/cache.py +355 -0
- evalscope/api/evaluator/evaluator.py +56 -0
- evalscope/api/evaluator/state.py +264 -0
- evalscope/api/filter/__init__.py +1 -0
- evalscope/api/filter/filter.py +72 -0
- evalscope/api/messages/__init__.py +11 -0
- evalscope/api/messages/chat_message.py +198 -0
- evalscope/api/messages/content.py +102 -0
- evalscope/api/messages/utils.py +35 -0
- evalscope/api/metric/__init__.py +2 -0
- evalscope/api/metric/metric.py +55 -0
- evalscope/api/metric/scorer.py +105 -0
- evalscope/api/mixin/__init__.py +2 -0
- evalscope/api/mixin/dataset_mixin.py +105 -0
- evalscope/api/mixin/llm_judge_mixin.py +168 -0
- evalscope/api/model/__init__.py +12 -0
- evalscope/api/model/generate_config.py +157 -0
- evalscope/api/model/model.py +383 -0
- evalscope/api/model/model_output.py +285 -0
- evalscope/api/registry.py +182 -0
- evalscope/api/tool/__init__.py +3 -0
- evalscope/api/tool/tool_call.py +101 -0
- evalscope/api/tool/tool_info.py +173 -0
- evalscope/api/tool/utils.py +64 -0
- evalscope/app/ui/app_ui.py +2 -1
- evalscope/app/ui/multi_model.py +50 -25
- evalscope/app/ui/single_model.py +23 -11
- evalscope/app/utils/data_utils.py +42 -26
- evalscope/app/utils/text_utils.py +0 -2
- evalscope/app/utils/visualization.py +9 -4
- evalscope/arguments.py +6 -7
- evalscope/backend/opencompass/api_meta_template.py +2 -1
- evalscope/backend/opencompass/backend_manager.py +6 -3
- evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +10 -10
- evalscope/backend/rag_eval/clip_benchmark/task_template.py +8 -4
- evalscope/backend/rag_eval/ragas/task_template.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/build_transform.py +7 -4
- evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +2 -1
- evalscope/backend/rag_eval/utils/embedding.py +2 -1
- evalscope/backend/rag_eval/utils/llm.py +13 -12
- evalscope/benchmarks/__init__.py +0 -2
- evalscope/benchmarks/aigc/i2i/__init__.py +0 -0
- evalscope/benchmarks/aigc/i2i/general_i2i_adapter.py +44 -0
- evalscope/benchmarks/aigc/t2i/evalmuse_adapter.py +53 -55
- evalscope/benchmarks/aigc/t2i/genai_bench_adapter.py +41 -46
- evalscope/benchmarks/aigc/t2i/general_t2i_adapter.py +29 -45
- evalscope/benchmarks/aigc/t2i/hpdv2_adapter.py +34 -44
- evalscope/benchmarks/aigc/t2i/tifa_adapter.py +16 -27
- evalscope/benchmarks/aime/aime24_adapter.py +38 -40
- evalscope/benchmarks/aime/aime25_adapter.py +34 -40
- evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +86 -60
- evalscope/benchmarks/arc/arc_adapter.py +34 -147
- evalscope/benchmarks/arena_hard/arena_hard_adapter.py +96 -70
- evalscope/benchmarks/arena_hard/utils.py +37 -1
- evalscope/benchmarks/bbh/bbh_adapter.py +72 -144
- evalscope/benchmarks/bfcl/bfcl_adapter.py +181 -160
- evalscope/benchmarks/bfcl/generation.py +222 -0
- evalscope/benchmarks/ceval/ceval_adapter.py +94 -162
- evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +85 -82
- evalscope/benchmarks/cmmlu/cmmlu_adapter.py +34 -125
- evalscope/benchmarks/competition_math/competition_math_adapter.py +56 -108
- evalscope/benchmarks/data_collection/data_collection_adapter.py +183 -45
- evalscope/benchmarks/docmath/docmath_adapter.py +109 -51
- evalscope/benchmarks/docmath/utils.py +4 -5
- evalscope/benchmarks/drop/drop_adapter.py +88 -40
- evalscope/benchmarks/frames/frames_adapter.py +135 -52
- evalscope/benchmarks/general_arena/general_arena_adapter.py +136 -98
- evalscope/benchmarks/general_arena/utils.py +23 -27
- evalscope/benchmarks/general_mcq/general_mcq_adapter.py +40 -101
- evalscope/benchmarks/general_qa/general_qa_adapter.py +73 -134
- evalscope/benchmarks/gpqa/gpqa_adapter.py +61 -100
- evalscope/benchmarks/gpqa/{chain_of_thought.txt → prompt.py} +12 -5
- evalscope/benchmarks/gsm8k/gsm8k_adapter.py +62 -142
- evalscope/benchmarks/hellaswag/hellaswag_adapter.py +35 -124
- evalscope/benchmarks/hle/hle_adapter.py +127 -93
- evalscope/benchmarks/humaneval/humaneval_adapter.py +86 -55
- evalscope/benchmarks/ifeval/ifeval_adapter.py +69 -40
- evalscope/benchmarks/ifeval/instructions.py +109 -64
- evalscope/benchmarks/ifeval/instructions_registry.py +1 -1
- evalscope/benchmarks/ifeval/utils.py +6 -7
- evalscope/benchmarks/iquiz/iquiz_adapter.py +30 -65
- evalscope/benchmarks/live_code_bench/evaluate_utils.py +2 -2
- evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +121 -71
- evalscope/benchmarks/live_code_bench/load_utils.py +13 -21
- evalscope/benchmarks/live_code_bench/testing_util.py +6 -2
- evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +49 -75
- evalscope/benchmarks/math_500/math_500_adapter.py +41 -48
- evalscope/benchmarks/mmlu/mmlu_adapter.py +32 -205
- evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +80 -99
- evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +64 -110
- evalscope/benchmarks/musr/musr_adapter.py +33 -64
- evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +192 -152
- evalscope/benchmarks/process_bench/process_bench_adapter.py +144 -76
- evalscope/benchmarks/race/race_adapter.py +33 -119
- evalscope/benchmarks/simple_qa/simple_qa_adapter.py +72 -70
- evalscope/benchmarks/super_gpqa/{five_shot_prompt.txt → prompt.py} +14 -16
- evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +73 -117
- evalscope/benchmarks/super_gpqa/utils.py +2 -1
- evalscope/benchmarks/tau_bench/generation.py +147 -0
- evalscope/benchmarks/tau_bench/tau_bench_adapter.py +112 -54
- evalscope/benchmarks/tool_bench/tool_bench_adapter.py +91 -70
- evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +56 -124
- evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +70 -265
- evalscope/benchmarks/winogrande/winogrande_adapter.py +28 -54
- evalscope/cli/cli.py +2 -0
- evalscope/cli/start_server.py +6 -3
- evalscope/collections/__init__.py +2 -10
- evalscope/collections/sampler.py +10 -10
- evalscope/collections/schema.py +13 -11
- evalscope/config.py +95 -54
- evalscope/constants.py +29 -61
- evalscope/evaluator/__init__.py +1 -1
- evalscope/evaluator/evaluator.py +277 -423
- evalscope/filters/__init__.py +2 -0
- evalscope/filters/extraction.py +126 -0
- evalscope/filters/selection.py +57 -0
- evalscope/metrics/__init__.py +13 -13
- evalscope/metrics/llm_judge.py +32 -30
- evalscope/metrics/math_parser.py +27 -22
- evalscope/metrics/metric.py +307 -0
- evalscope/metrics/metrics.py +22 -18
- evalscope/metrics/t2v_metrics/__init__.py +0 -52
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +4 -2
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +9 -13
- evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +2 -1
- evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +3 -2
- evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +2 -1
- evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +2 -2
- evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +2 -1
- evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +4 -2
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +10 -5
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +4 -2
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +2 -1
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +15 -9
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +4 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +15 -10
- evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +9 -6
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +2 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +4 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +4 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +3 -9
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +16 -10
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +3 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +4 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +8 -4
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +47 -25
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +12 -7
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +23 -17
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +33 -23
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +2 -1
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +46 -30
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +69 -37
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +7 -5
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +6 -4
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +7 -5
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +3 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +5 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +17 -13
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +35 -19
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +14 -12
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +63 -52
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +63 -38
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +6 -3
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +6 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +3 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +15 -13
- evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +3 -2
- evalscope/models/__init__.py +6 -29
- evalscope/models/mockllm.py +65 -0
- evalscope/models/model_apis.py +47 -0
- evalscope/models/modelscope.py +455 -0
- evalscope/models/openai_compatible.py +123 -0
- evalscope/models/text2image_model.py +124 -0
- evalscope/models/utils/openai.py +698 -0
- evalscope/perf/benchmark.py +2 -1
- evalscope/perf/http_client.py +4 -2
- evalscope/perf/plugin/api/custom_api.py +5 -4
- evalscope/perf/plugin/api/openai_api.py +11 -9
- evalscope/perf/plugin/datasets/custom.py +2 -1
- evalscope/perf/plugin/datasets/flickr8k.py +1 -1
- evalscope/perf/plugin/datasets/kontext_bench.py +1 -1
- evalscope/perf/plugin/datasets/line_by_line.py +2 -1
- evalscope/perf/plugin/datasets/longalpaca.py +2 -1
- evalscope/perf/plugin/datasets/openqa.py +4 -2
- evalscope/perf/utils/benchmark_util.py +7 -5
- evalscope/perf/utils/db_util.py +9 -6
- evalscope/perf/utils/local_server.py +8 -3
- evalscope/perf/utils/rich_display.py +16 -10
- evalscope/report/__init__.py +2 -2
- evalscope/report/combinator.py +18 -12
- evalscope/report/generator.py +101 -6
- evalscope/report/{utils.py → report.py} +8 -6
- evalscope/run.py +26 -44
- evalscope/summarizer.py +1 -1
- evalscope/utils/__init__.py +21 -2
- evalscope/utils/chat_service.py +2 -1
- evalscope/utils/deprecation_utils.py +12 -1
- evalscope/utils/function_utils.py +29 -0
- evalscope/utils/io_utils.py +100 -5
- evalscope/utils/json_schema.py +208 -0
- evalscope/utils/logger.py +51 -12
- evalscope/utils/model_utils.py +10 -7
- evalscope/utils/multi_choices.py +271 -0
- evalscope/utils/url_utils.py +65 -0
- evalscope/version.py +2 -2
- {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/METADATA +98 -49
- {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/RECORD +234 -216
- tests/aigc/test_t2i.py +22 -4
- tests/benchmark/__init__.py +1 -0
- tests/benchmark/test_eval.py +386 -0
- tests/cli/test_all.py +3 -5
- tests/cli/test_collection.py +13 -4
- tests/cli/test_custom.py +22 -15
- tests/rag/test_clip_benchmark.py +1 -0
- evalscope/benchmarks/aigc/t2i/base.py +0 -56
- evalscope/benchmarks/arc/ai2_arc.py +0 -151
- evalscope/benchmarks/benchmark.py +0 -81
- evalscope/benchmarks/ceval/ceval_exam.py +0 -146
- evalscope/benchmarks/cmmlu/cmmlu.py +0 -161
- evalscope/benchmarks/cmmlu/samples.jsonl +0 -5
- evalscope/benchmarks/competition_math/competition_math.py +0 -79
- evalscope/benchmarks/data_adapter.py +0 -528
- evalscope/benchmarks/filters.py +0 -59
- evalscope/benchmarks/gsm8k/gsm8k.py +0 -121
- evalscope/benchmarks/hellaswag/hellaswag.py +0 -112
- evalscope/benchmarks/humaneval/humaneval.py +0 -79
- evalscope/benchmarks/mmlu/mmlu.py +0 -160
- evalscope/benchmarks/mmlu/samples.jsonl +0 -5
- evalscope/benchmarks/process_bench/critique_template.txt +0 -13
- evalscope/benchmarks/race/race.py +0 -104
- evalscope/benchmarks/race/samples.jsonl +0 -5
- evalscope/benchmarks/super_gpqa/zero_shot_prompt.txt +0 -4
- evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -89
- evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -163
- evalscope/benchmarks/utils.py +0 -60
- evalscope/collections/evaluator.py +0 -375
- evalscope/metrics/completion_parsers.py +0 -227
- evalscope/metrics/named_metrics.py +0 -55
- evalscope/models/adapters/__init__.py +0 -14
- evalscope/models/adapters/base_adapter.py +0 -84
- evalscope/models/adapters/bfcl_adapter.py +0 -246
- evalscope/models/adapters/chat_adapter.py +0 -207
- evalscope/models/adapters/choice_adapter.py +0 -222
- evalscope/models/adapters/custom_adapter.py +0 -71
- evalscope/models/adapters/server_adapter.py +0 -236
- evalscope/models/adapters/t2i_adapter.py +0 -79
- evalscope/models/adapters/tau_bench_adapter.py +0 -189
- evalscope/models/custom/__init__.py +0 -4
- evalscope/models/custom/custom_model.py +0 -50
- evalscope/models/custom/dummy_model.py +0 -99
- evalscope/models/local_model.py +0 -128
- evalscope/models/register.py +0 -41
- tests/cli/test_run.py +0 -489
- {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/LICENSE +0 -0
- {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/WHEEL +0 -0
- {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/entry_points.txt +0 -0
- {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/top_level.txt +0 -0
|
@@ -1,75 +1,97 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
from evalscope.
|
|
1
|
+
# flake8: noqa: E501
|
|
2
|
+
from typing import Any, Dict, List
|
|
3
|
+
|
|
4
|
+
from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
|
|
5
|
+
from evalscope.api.dataset import Sample
|
|
6
|
+
from evalscope.api.evaluator import TaskState
|
|
7
|
+
from evalscope.api.metric import AggScore, SampleScore, Score
|
|
8
|
+
from evalscope.api.registry import register_benchmark
|
|
9
|
+
from evalscope.constants import Tags
|
|
5
10
|
from evalscope.utils.logger import get_logger
|
|
6
11
|
|
|
7
|
-
# flake8: noqa
|
|
8
|
-
|
|
9
12
|
logger = get_logger()
|
|
10
13
|
|
|
11
|
-
GRADER_SYSTEM_PROMPT = "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt displayed below. You will be given assistant A's answer and assistant B's answer. Your job is to evaluate which assistant's answer is better.\n\nBegin your evaluation by generating your own answer to the prompt. You must provide your answers before judging any answers.\n\nWhen evaluating the assistants' answers, compare both assistants' answers with your answer. You must identify and correct any mistakes or inaccurate information.\n\nThen consider if the assistant's answers are helpful, relevant, and concise. Helpful means the answer correctly responds to the prompt or follows the instructions. Note when user prompt has any ambiguity or more than one interpretation, it is more helpful and appropriate to ask for clarifications or more information from the user than providing an answer based on assumptions. Relevant means all parts of the response closely connect or are appropriate to what is being asked. Concise means the response is clear and not verbose or excessive.\n\nThen consider the creativity and novelty of the assistant's answers when needed. Finally, identify any missing important information in the assistants' answers that would be beneficial to include when responding to the user prompt.\n\nAfter providing your explanation, you must output only one of the following choices as your final verdict with a label:\n\n1. Assistant A is significantly better: [[A>>B]]\n2. Assistant A is slightly better: [[A>B]]\n3. Tie, relatively the same: [[A=B]]\n4. Assistant B is slightly better: [[B>A]]\n5. Assistant B is significantly better: [[B>>A]]\n\nExample output: \"My final verdict is tie: [[A=B]]\"." # noqa: E501
|
|
12
|
-
|
|
13
|
-
GRADER_TEMPLATE = "<|User Prompt|>\n{question}\n\n<|The Start of Assistant A's Answer|>\n{answer_1}\n<|The End of Assistant A's Answer|>\n\n<|The Start of Assistant B's Answer|>\n{answer_2}\n<|The End of Assistant B's Answer|>".strip(
|
|
14
|
-
)
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
@
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
14
|
+
GRADER_SYSTEM_PROMPT = """Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt displayed below. You will be given assistant A's answer and assistant B's answer. Your job is to evaluate which assistant's answer is better.\n\nBegin your evaluation by generating your own answer to the prompt. You must provide your answers before judging any answers.\n\nWhen evaluating the assistants' answers, compare both assistants' answers with your answer. You must identify and correct any mistakes or inaccurate information.\n\nThen consider if the assistant's answers are helpful, relevant, and concise. Helpful means the answer correctly responds to the prompt or follows the instructions. Note when user prompt has any ambiguity or more than one interpretation, it is more helpful and appropriate to ask for clarifications or more information from the user than providing an answer based on assumptions. Relevant means all parts of the response closely connect or are appropriate to what is being asked. Concise means the response is clear and not verbose or excessive.\n\nThen consider the creativity and novelty of the assistant's answers when needed. Finally, identify any missing important information in the assistants' answers that would be beneficial to include when responding to the user prompt.\n\nAfter providing your explanation, you must output only one of the following choices as your final verdict with a label:\n\n1. Assistant A is significantly better: [[A>>B]]\n2. Assistant A is slightly better: [[A>B]]\n3. Tie, relatively the same: [[A=B]]\n4. Assistant B is slightly better: [[B>A]]\n5. Assistant B is significantly better: [[B>>A]]\n\nExample output: \"My final verdict is tie: [[A=B]]\".""" # noqa: E501
|
|
15
|
+
|
|
16
|
+
GRADER_TEMPLATE = """<|User Prompt|>\n{question}\n\n<|The Start of Assistant A's Answer|>\n{answer_1}\n<|The End of Assistant A's Answer|>\n\n<|The Start of Assistant B's Answer|>\n{answer_2}\n<|The End of Assistant B's Answer|>""".strip(
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@register_benchmark(
|
|
21
|
+
BenchmarkMeta(
|
|
22
|
+
name='arena_hard',
|
|
23
|
+
pretty_name='ArenaHard',
|
|
24
|
+
tags=[Tags.INSTRUCTION_FOLLOWING, Tags.ARENA],
|
|
25
|
+
description=
|
|
26
|
+
'ArenaHard is a benchmark designed to evaluate the performance of large language models in a competitive setting, '
|
|
27
|
+
'where models are pitted against each other in a series of tasks to determine their relative strengths and weaknesses. '
|
|
28
|
+
'It includes a set of challenging tasks that require reasoning, understanding, and generation capabilities. '
|
|
29
|
+
'Currently not support `style-controlled winrate`; the official Judge model is `gpt-4-1106-preview`, while the baseline model is `gpt-4-0314`.',
|
|
30
|
+
dataset_id='AI-ModelScope/arena-hard-auto-v0.1',
|
|
31
|
+
metric_list=['winrate'],
|
|
32
|
+
few_shot_num=0,
|
|
33
|
+
train_split=None,
|
|
34
|
+
eval_split='test',
|
|
35
|
+
prompt_template='{question}'
|
|
36
|
+
)
|
|
37
|
+
)
|
|
38
|
+
class ArenaHardAdapter(DefaultDataAdapter):
|
|
32
39
|
|
|
33
40
|
def __init__(self, *args, **kwargs):
|
|
34
41
|
super().__init__(*args, **kwargs)
|
|
35
42
|
|
|
36
|
-
#
|
|
37
|
-
metric_registry.register(Metric(name='winrate', object=mean))
|
|
38
|
-
|
|
39
|
-
# whether to use LLM as a judge
|
|
40
|
-
self.llm_as_a_judge = True
|
|
43
|
+
self._use_llm_judge = True # Use LLM as a judge by default
|
|
41
44
|
|
|
42
|
-
def
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
def get_gold_answer(self, input_d: dict) -> str:
|
|
47
|
-
return input_d['prediction']
|
|
48
|
-
|
|
49
|
-
def parse_pred_result(self, result: str, raw_input_d: dict = None, **kwargs) -> str:
|
|
50
|
-
return result.strip()
|
|
51
|
-
|
|
52
|
-
def match(self, gold: str, pred: str):
|
|
53
|
-
# simple match
|
|
54
|
-
logger.warning(f'Please use LLMJudge to match the result for {self.name}')
|
|
55
|
-
return None
|
|
45
|
+
def record_to_sample(self, record: Dict[str, Any]) -> Sample:
|
|
46
|
+
"""
|
|
47
|
+
Convert a data record to a Sample object.
|
|
56
48
|
|
|
57
|
-
|
|
58
|
-
|
|
49
|
+
Args:
|
|
50
|
+
record (Dict[str, Any]): Input data record.
|
|
59
51
|
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
52
|
+
Returns:
|
|
53
|
+
Sample: Sample object with input, target, and metadata.
|
|
54
|
+
"""
|
|
55
|
+
question = record['question']
|
|
56
|
+
baseline_prediction = record['prediction'] # baseline model prediction
|
|
57
|
+
|
|
58
|
+
return Sample(
|
|
59
|
+
input=question, target=baseline_prediction, metadata={'capability': record.get('capability', 'unknown')}
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
def llm_match_score(
|
|
63
|
+
self,
|
|
64
|
+
original_prediction: str,
|
|
65
|
+
filtered_prediction: str,
|
|
66
|
+
reference: str,
|
|
67
|
+
task_state: TaskState,
|
|
68
|
+
) -> Score:
|
|
69
|
+
from .utils import get_judge_score, post_process_arenahard
|
|
70
|
+
|
|
71
|
+
score = Score(
|
|
72
|
+
extracted_prediction=filtered_prediction,
|
|
73
|
+
prediction=original_prediction,
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
question = task_state.input_text
|
|
77
|
+
|
|
78
|
+
# reference is baseline answer 'A', filtered_prediction is model answer 'B'
|
|
79
|
+
prompt1 = GRADER_TEMPLATE.format(question=question, answer_1=reference, answer_2=filtered_prediction)
|
|
64
80
|
# reverse the order
|
|
65
|
-
prompt2 = GRADER_TEMPLATE.format(question=question, answer_1=
|
|
81
|
+
prompt2 = GRADER_TEMPLATE.format(question=question, answer_1=filtered_prediction, answer_2=reference)
|
|
82
|
+
|
|
66
83
|
# get grading response
|
|
67
|
-
game1_response = judge(prompt1, system_prompt=GRADER_SYSTEM_PROMPT)
|
|
68
|
-
game2_response = judge(prompt2, system_prompt=GRADER_SYSTEM_PROMPT)
|
|
84
|
+
game1_response = self.llm_judge.judge(prompt1, system_prompt=GRADER_SYSTEM_PROMPT)
|
|
85
|
+
game2_response = self.llm_judge.judge(prompt2, system_prompt=GRADER_SYSTEM_PROMPT)
|
|
86
|
+
|
|
69
87
|
# parse grading response
|
|
70
88
|
res1 = post_process_arenahard(game1_response)
|
|
71
89
|
res2 = post_process_arenahard(game2_response)
|
|
72
|
-
|
|
90
|
+
|
|
91
|
+
score1 = get_judge_score(res1, reverse=True)
|
|
92
|
+
score2 = get_judge_score(res2, reverse=False)
|
|
93
|
+
|
|
94
|
+
battle_result = {
|
|
73
95
|
'model_a':
|
|
74
96
|
'gpt4-0314',
|
|
75
97
|
'model_b':
|
|
@@ -88,22 +110,26 @@ class ArenaHardAdapter(DataAdapter):
|
|
|
88
110
|
]
|
|
89
111
|
}
|
|
90
112
|
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
113
|
+
# Set score based on the battle result
|
|
114
|
+
score.value = {'score': (score1 + score2) / 2}
|
|
115
|
+
score.explanation = f'LLM judge battles: Game1: {game1_response[:100]}... Game2: {game2_response[:100]}...'
|
|
116
|
+
score.metadata = {
|
|
117
|
+
'source': 'llm_judge',
|
|
118
|
+
'judge_strategy': self.judge_strategy,
|
|
119
|
+
'model': self.llm_judge.model_id,
|
|
120
|
+
'battle_result': battle_result
|
|
121
|
+
}
|
|
122
|
+
return score
|
|
123
|
+
|
|
124
|
+
def aggregate_scores(self, sample_scores: List[SampleScore]) -> List[AggScore]:
|
|
95
125
|
import pandas as pd
|
|
96
126
|
|
|
97
127
|
from .utils import compute_mle_elo, get_battles_from_row, get_bootstrap_result, get_win_rate_column
|
|
98
128
|
|
|
99
|
-
|
|
100
|
-
review_res_list = [item for sublist in review_res_list for item in sublist]
|
|
101
|
-
|
|
102
|
-
battles = pd.concat([get_battles_from_row(res) for res in review_res_list])
|
|
129
|
+
battles = pd.concat([get_battles_from_row(res.score.metadata['battle_result']) for res in sample_scores])
|
|
103
130
|
|
|
104
131
|
bootstrap_online_elo = compute_mle_elo(battles)
|
|
105
132
|
|
|
106
|
-
# bootstrap_elo_lu = get_bootstrap_result(battles, compute_mle_elo, 100)
|
|
107
133
|
stats = pd.DataFrame()
|
|
108
134
|
stats['results'] = None
|
|
109
135
|
stats['results'] = stats['results'].astype('object')
|
|
@@ -112,11 +138,11 @@ class ArenaHardAdapter(DataAdapter):
|
|
|
112
138
|
# assert model in bootstrap_elo_lu.columns
|
|
113
139
|
stats.at[i, 'model'] = model
|
|
114
140
|
stats.at[i, 'score'] = bootstrap_online_elo[model]
|
|
115
|
-
# stats.at[i, "lower"] = np.percentile(bootstrap_elo_lu[model], 2.5)
|
|
116
|
-
# stats.at[i, "upper"] = np.percentile(bootstrap_elo_lu[model], 97.5)
|
|
117
|
-
|
|
118
|
-
# stats['score'] = get_win_rate_column(stats, 'score', 'gpt4-0314').tolist()
|
|
119
141
|
|
|
120
142
|
score = get_win_rate_column(stats, 'score', 'gpt4-0314').at['test_model']
|
|
121
143
|
|
|
122
|
-
return [
|
|
144
|
+
return [AggScore(
|
|
145
|
+
score=score,
|
|
146
|
+
metric_name='winrate',
|
|
147
|
+
num=len(sample_scores),
|
|
148
|
+
)]
|
|
@@ -19,6 +19,41 @@ def post_process_arenahard(completion):
|
|
|
19
19
|
return None
|
|
20
20
|
|
|
21
21
|
|
|
22
|
+
def get_judge_score(result, reverse=False):
|
|
23
|
+
"""
|
|
24
|
+
Calculate the judge score, considering confidence weight.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
result: Judgment result ('A=B', 'A>B', 'A>>B', 'B>A', 'B>>A')
|
|
28
|
+
reverse: Whether to reverse the score
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
float: Weighted score
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
# Base score mapping - using finer-grained scores
|
|
35
|
+
if not reverse:
|
|
36
|
+
score_mapping = {
|
|
37
|
+
'A=B': 0.5, # Tie
|
|
38
|
+
'A>B': 0.75, # A slightly wins
|
|
39
|
+
'A>>B': 1.0, # A significantly wins
|
|
40
|
+
'B>A': 0.25, # B slightly wins
|
|
41
|
+
'B>>A': 0.0, # B significantly wins
|
|
42
|
+
}
|
|
43
|
+
else:
|
|
44
|
+
score_mapping = {
|
|
45
|
+
'A=B': 0.5, # Tie
|
|
46
|
+
'A>B': 0.25, # A slightly wins
|
|
47
|
+
'A>>B': 0.0, # A significantly wins
|
|
48
|
+
'B>A': 0.75, # B slightly wins
|
|
49
|
+
'B>>A': 1.0, # B significantly wins
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
base_score = score_mapping.get(result, 0.5)
|
|
53
|
+
|
|
54
|
+
return base_score
|
|
55
|
+
|
|
56
|
+
|
|
22
57
|
def get_battles_from_row(row, first_game_only=False, multiplier=3):
|
|
23
58
|
results = []
|
|
24
59
|
output = {'model_a': row['model_a'], 'model_b': row['model_b']}
|
|
@@ -106,7 +141,8 @@ def compute_mle_elo(df, SCALE=400, BASE=10, INIT_RATING=1000):
|
|
|
106
141
|
return elo_scores.sort_values(ascending=False)
|
|
107
142
|
|
|
108
143
|
lr = LogisticRegression(
|
|
109
|
-
fit_intercept=False, penalty=None, tol=1e-8
|
|
144
|
+
fit_intercept=False, penalty=None, tol=1e-8
|
|
145
|
+
) # May need to set a small value when not use GPT4 as judge model
|
|
110
146
|
lr.fit(X, Y)
|
|
111
147
|
|
|
112
148
|
elo_scores = SCALE * lr.coef_[0] + INIT_RATING
|
|
@@ -1,17 +1,16 @@
|
|
|
1
1
|
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
2
|
|
|
3
|
-
import json
|
|
4
3
|
import os
|
|
5
|
-
import random
|
|
6
4
|
import re
|
|
5
|
+
from typing import Any, Dict
|
|
7
6
|
|
|
8
|
-
from evalscope.
|
|
9
|
-
from evalscope.
|
|
10
|
-
from evalscope.
|
|
7
|
+
from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
|
|
8
|
+
from evalscope.api.dataset import Sample
|
|
9
|
+
from evalscope.api.evaluator import TaskState
|
|
10
|
+
from evalscope.api.registry import register_benchmark
|
|
11
|
+
from evalscope.constants import Tags
|
|
11
12
|
from evalscope.utils.logger import get_logger
|
|
12
13
|
|
|
13
|
-
# flake8: noqa
|
|
14
|
-
|
|
15
14
|
logger = get_logger()
|
|
16
15
|
|
|
17
16
|
# BBH multiple choice subset list
|
|
@@ -55,160 +54,89 @@ FREE_FORM_LIST = [
|
|
|
55
54
|
TASK_TYPE = 'task_type'
|
|
56
55
|
SUBSET_LIST = MULTIPLE_CHOICE_LIST + FREE_FORM_LIST
|
|
57
56
|
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
57
|
+
PROMPT_TEMPLATE = """
|
|
58
|
+
Q: {question}
|
|
59
|
+
A: Let's think step by step. Put your final answer in the format of "So the answer is $ANSWER" (without quotes and markdown) where $ANSWER is the answer to the problem.
|
|
60
|
+
""".lstrip() # noqa: E501
|
|
61
|
+
|
|
62
|
+
FEWSHOT_TEMPLATE = """
|
|
63
|
+
{fewshot}
|
|
64
|
+
|
|
65
|
+
""".lstrip() + PROMPT_TEMPLATE
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
@register_benchmark(
|
|
69
|
+
BenchmarkMeta(
|
|
70
|
+
name='bbh',
|
|
71
|
+
pretty_name='BBH',
|
|
72
|
+
dataset_id='evalscope/bbh',
|
|
73
|
+
tags=[Tags.REASONING],
|
|
74
|
+
description=
|
|
75
|
+
'The BBH (Big Bench Hard) benchmark is a collection of challenging tasks designed to evaluate the reasoning capabilities of AI models. It includes both free-form and multiple-choice tasks, covering a wide range of reasoning skills.', # noqa: E501
|
|
76
|
+
subset_list=SUBSET_LIST,
|
|
77
|
+
few_shot_num=3,
|
|
78
|
+
train_split=None,
|
|
79
|
+
eval_split='test',
|
|
80
|
+
metric_list=['acc'],
|
|
81
|
+
prompt_template=PROMPT_TEMPLATE,
|
|
82
|
+
few_shot_prompt_template=FEWSHOT_TEMPLATE,
|
|
83
|
+
)
|
|
72
84
|
)
|
|
73
|
-
class BBHAdapter(
|
|
85
|
+
class BBHAdapter(DefaultDataAdapter):
|
|
74
86
|
"""
|
|
75
87
|
Adapter for BBH free-form and multiple-choices sub-tasks.
|
|
76
88
|
"""
|
|
77
89
|
|
|
78
90
|
def __init__(self, **kwargs):
|
|
79
|
-
|
|
80
91
|
few_shot_num = kwargs.get('few_shot_num', 3)
|
|
81
92
|
|
|
82
93
|
if few_shot_num != 3 and few_shot_num != 0:
|
|
83
|
-
logger.error(
|
|
84
|
-
|
|
94
|
+
logger.error(
|
|
95
|
+
f'BBH uses 3-shot examples with CoT or 0-shot by system, but got {few_shot_num}. '
|
|
96
|
+
f'Use 3-shot by default.'
|
|
97
|
+
)
|
|
85
98
|
kwargs['few_shot_num'] = 3
|
|
86
99
|
|
|
87
100
|
super().__init__(**kwargs)
|
|
88
101
|
|
|
89
|
-
def
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
{'data': ['xxx']}
|
|
121
|
-
"""
|
|
122
|
-
# few_shot_list: should be ['xxxx']
|
|
123
|
-
if len(few_shot_list) > 0:
|
|
124
|
-
cot_prompts = 'Follow the given examples and answer the question.\n' + few_shot_list[0]
|
|
125
|
-
else:
|
|
126
|
-
cot_prompts = ''
|
|
127
|
-
full_prompt = cot_prompts + self.prompt_template.format(query=input_d['input'])
|
|
128
|
-
|
|
129
|
-
return self.gen_prompt_data(full_prompt)
|
|
130
|
-
|
|
131
|
-
def gen_prompts(self, data_dict: dict) -> dict:
|
|
132
|
-
"""
|
|
133
|
-
Generate dataset prompts from raw input, unify the prompt format for different datasets.
|
|
134
|
-
|
|
135
|
-
Args:
|
|
136
|
-
data_dict: Refer to the output of load method: evalscope.benchmarks.benchmark.Benchmark.load
|
|
137
|
-
|
|
138
|
-
Returns:
|
|
139
|
-
{'subset_name': [prompt_d_1, prompt_d_2, ...]}
|
|
140
|
-
prompt_d_i (dict): refer to the output of gen_prompt method.
|
|
141
|
-
|
|
142
|
-
e.g. train -- few-shot data, test -- target dataset to evaluate.
|
|
143
|
-
"""
|
|
144
|
-
res_dict: dict = {}
|
|
145
|
-
|
|
146
|
-
if self.few_shot_num < 0:
|
|
147
|
-
raise ValueError(f'Invalid shot_num: {self.few_shot_num} for few-shot evaluation.')
|
|
148
|
-
|
|
149
|
-
logger.info(f'Use default settings: '
|
|
150
|
-
f'> few_shot_num: {self.few_shot_num}, '
|
|
151
|
-
f'> few_shot_split: {self.train_split}, '
|
|
152
|
-
f'> target_eval_split: {self.eval_split}')
|
|
153
|
-
|
|
154
|
-
for sub_name, sub_data_dict in data_dict.items():
|
|
155
|
-
few_shot_data = []
|
|
156
|
-
if self.few_shot_num > 0:
|
|
157
|
-
with open(
|
|
158
|
-
os.path.join(os.path.dirname(__file__), 'cot_prompts', f'{sub_name}.txt'), 'r',
|
|
159
|
-
encoding='utf-8') as f:
|
|
160
|
-
cot_prompt_str = f.read()
|
|
161
|
-
few_shot_data = [cot_prompt_str]
|
|
162
|
-
|
|
163
|
-
res_dict[sub_name] = []
|
|
164
|
-
for sample_d in sub_data_dict[self.eval_split]:
|
|
165
|
-
prompt_d = self.gen_prompt(input_d=sample_d, few_shot_list=few_shot_data)
|
|
166
|
-
sample_d_new = sample_d.copy()
|
|
167
|
-
if sub_name in MULTIPLE_CHOICE_LIST:
|
|
168
|
-
sample_d_new[TASK_TYPE] = MULTIPLE_CHOICE
|
|
169
|
-
elif sub_name in FREE_FORM_LIST:
|
|
170
|
-
sample_d_new[TASK_TYPE] = FREE_FORM
|
|
171
|
-
else:
|
|
172
|
-
raise ValueError(f'Invalid subset name: {sub_name}')
|
|
173
|
-
|
|
174
|
-
prompt_d[AnswerKeys.RAW_INPUT] = sample_d_new
|
|
175
|
-
res_dict[sub_name].append(prompt_d)
|
|
176
|
-
|
|
177
|
-
return res_dict
|
|
178
|
-
|
|
179
|
-
def get_gold_answer(self, input_d: dict) -> str:
|
|
180
|
-
# Get the gold choice
|
|
181
|
-
gold = input_d.get('target', '')
|
|
182
|
-
# remove brackets
|
|
183
|
-
if gold is None:
|
|
184
|
-
logger.error(f'BBHAdapter: gold is None.')
|
|
185
|
-
gold = gold.replace('(', '').replace(')', '')
|
|
186
|
-
return gold
|
|
187
|
-
|
|
188
|
-
def parse_pred_result(self, result: str, raw_input_d: dict = None, eval_type: str = 'checkpoint') -> str:
|
|
189
|
-
"""
|
|
190
|
-
Parse the model output to get the answer. Could be the best choice index.
|
|
191
|
-
|
|
192
|
-
Args:
|
|
193
|
-
result: Predicted answer from the model. Usually a string for chat.
|
|
194
|
-
raw_input_d (dict): The raw input. Depending on the dataset.
|
|
195
|
-
eval_type: 'checkpoint' or 'service' or `custom`, default: 'checkpoint'
|
|
196
|
-
|
|
197
|
-
Returns:
|
|
198
|
-
The parsed answer. Depending on the dataset. Usually a string for chat.
|
|
199
|
-
"""
|
|
200
|
-
# Note: to use same extraction method for both of checkpoint/service/custom.
|
|
201
|
-
task_type: str = raw_input_d.get(TASK_TYPE)
|
|
102
|
+
def record_to_sample(self, record: Dict[str, Any]) -> Sample:
|
|
103
|
+
input = record['input']
|
|
104
|
+
target = record['target'].replace('(', '').replace(')', '').strip() # Clean up the target answer
|
|
105
|
+
|
|
106
|
+
# Determine task type based on subset name
|
|
107
|
+
task_type = None
|
|
108
|
+
subset_name = self.current_subset_name
|
|
109
|
+
if subset_name in MULTIPLE_CHOICE_LIST:
|
|
110
|
+
task_type = MULTIPLE_CHOICE
|
|
111
|
+
elif subset_name in FREE_FORM_LIST:
|
|
112
|
+
task_type = FREE_FORM
|
|
113
|
+
|
|
114
|
+
metadata = {TASK_TYPE: task_type}
|
|
115
|
+
|
|
116
|
+
return Sample(input=input, target=target, metadata=metadata, subset_key=subset_name)
|
|
117
|
+
|
|
118
|
+
def format_fewshot_template(self, fewshot: str, sample: Sample) -> str:
|
|
119
|
+
# Load CoT prompts from file for BBH
|
|
120
|
+
subset_name = sample.subset_key
|
|
121
|
+
if subset_name:
|
|
122
|
+
cot_file_path = os.path.join(os.path.dirname(__file__), 'cot_prompts', f'{subset_name}.txt')
|
|
123
|
+
if os.path.exists(cot_file_path):
|
|
124
|
+
with open(cot_file_path, 'r', encoding='utf-8') as f:
|
|
125
|
+
fewshot = f.read().strip()
|
|
126
|
+
return self.few_shot_prompt_template.format(
|
|
127
|
+
fewshot=fewshot,
|
|
128
|
+
question=sample.input,
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
def extract_answer(self, prediction: str, task_state: TaskState):
|
|
132
|
+
task_type = task_state.metadata.get(TASK_TYPE)
|
|
202
133
|
|
|
203
134
|
if task_type == MULTIPLE_CHOICE:
|
|
204
|
-
return self._extract_mc_answer(
|
|
135
|
+
return self._extract_mc_answer(prediction)
|
|
205
136
|
elif task_type == FREE_FORM:
|
|
206
|
-
return self._extract_ff_answer(
|
|
137
|
+
return self._extract_ff_answer(prediction)
|
|
207
138
|
else:
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
def match(self, gold: str, pred: str) -> float:
|
|
211
|
-
return exact_match(gold=gold, pred=pred)
|
|
139
|
+
return prediction.strip()
|
|
212
140
|
|
|
213
141
|
@classmethod
|
|
214
142
|
def _extract_mc_answer(cls, ans: str) -> str:
|