evalscope 0.17.1__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of evalscope might be problematic. Click here for more details.
- evalscope/__init__.py +4 -1
- evalscope/api/benchmark/__init__.py +3 -0
- evalscope/api/benchmark/adapters/__init__.py +5 -0
- evalscope/api/benchmark/adapters/default_data_adapter.py +684 -0
- evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
- evalscope/api/benchmark/adapters/multi_choice_adapter.py +83 -0
- evalscope/api/benchmark/adapters/text2image_adapter.py +156 -0
- evalscope/api/benchmark/adapters/vision_language_adapter.py +6 -0
- evalscope/api/benchmark/benchmark.py +356 -0
- evalscope/api/benchmark/meta.py +121 -0
- evalscope/api/dataset/__init__.py +2 -0
- evalscope/api/dataset/dataset.py +349 -0
- evalscope/api/dataset/loader.py +262 -0
- evalscope/api/dataset/utils.py +143 -0
- evalscope/api/evaluator/__init__.py +3 -0
- evalscope/api/evaluator/cache.py +378 -0
- evalscope/api/evaluator/evaluator.py +56 -0
- evalscope/api/evaluator/state.py +275 -0
- evalscope/api/filter/__init__.py +1 -0
- evalscope/api/filter/filter.py +72 -0
- evalscope/api/messages/__init__.py +12 -0
- evalscope/api/messages/chat_message.py +243 -0
- evalscope/api/messages/content.py +102 -0
- evalscope/api/messages/utils.py +35 -0
- evalscope/api/metric/__init__.py +2 -0
- evalscope/api/metric/metric.py +55 -0
- evalscope/api/metric/scorer.py +113 -0
- evalscope/api/mixin/__init__.py +1 -0
- evalscope/api/mixin/llm_judge_mixin.py +168 -0
- evalscope/api/model/__init__.py +12 -0
- evalscope/api/model/generate_config.py +155 -0
- evalscope/api/model/model.py +386 -0
- evalscope/api/model/model_output.py +285 -0
- evalscope/api/registry.py +182 -0
- evalscope/api/tool/__init__.py +3 -0
- evalscope/api/tool/tool_call.py +101 -0
- evalscope/api/tool/tool_info.py +173 -0
- evalscope/api/tool/utils.py +64 -0
- evalscope/app/app.py +3 -0
- evalscope/app/ui/app_ui.py +2 -1
- evalscope/app/ui/multi_model.py +50 -25
- evalscope/app/ui/single_model.py +26 -14
- evalscope/app/utils/data_utils.py +43 -27
- evalscope/app/utils/env_utils.py +12 -0
- evalscope/app/utils/text_utils.py +14 -14
- evalscope/app/utils/visualization.py +9 -4
- evalscope/arguments.py +7 -10
- evalscope/backend/opencompass/api_meta_template.py +2 -1
- evalscope/backend/opencompass/backend_manager.py +6 -5
- evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +10 -10
- evalscope/backend/rag_eval/clip_benchmark/task_template.py +8 -4
- evalscope/backend/rag_eval/ragas/task_template.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/build_transform.py +7 -4
- evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +2 -1
- evalscope/backend/rag_eval/utils/embedding.py +10 -1
- evalscope/backend/rag_eval/utils/llm.py +13 -12
- evalscope/benchmarks/__init__.py +0 -2
- evalscope/benchmarks/aime/aime24_adapter.py +38 -40
- evalscope/benchmarks/aime/aime25_adapter.py +34 -40
- evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +86 -60
- evalscope/benchmarks/arc/arc_adapter.py +34 -147
- evalscope/benchmarks/arena_hard/arena_hard_adapter.py +96 -70
- evalscope/benchmarks/arena_hard/utils.py +37 -1
- evalscope/benchmarks/bbh/bbh_adapter.py +72 -144
- evalscope/benchmarks/bfcl/bfcl_adapter.py +188 -171
- evalscope/benchmarks/bfcl/generation.py +222 -0
- evalscope/benchmarks/ceval/ceval_adapter.py +93 -162
- evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +85 -82
- evalscope/benchmarks/cmmlu/cmmlu_adapter.py +34 -125
- evalscope/benchmarks/competition_math/competition_math_adapter.py +56 -108
- evalscope/benchmarks/data_collection/data_collection_adapter.py +187 -45
- evalscope/benchmarks/docmath/docmath_adapter.py +109 -51
- evalscope/benchmarks/docmath/utils.py +4 -5
- evalscope/benchmarks/drop/drop_adapter.py +88 -40
- evalscope/benchmarks/frames/frames_adapter.py +136 -52
- evalscope/benchmarks/general_arena/general_arena_adapter.py +140 -98
- evalscope/benchmarks/general_arena/utils.py +23 -27
- evalscope/benchmarks/general_mcq/general_mcq_adapter.py +40 -101
- evalscope/benchmarks/general_qa/general_qa_adapter.py +73 -134
- evalscope/benchmarks/gpqa/gpqa_adapter.py +61 -100
- evalscope/benchmarks/gpqa/{chain_of_thought.txt → prompt.py} +12 -5
- evalscope/benchmarks/gsm8k/gsm8k_adapter.py +62 -142
- evalscope/benchmarks/hellaswag/hellaswag_adapter.py +35 -124
- evalscope/benchmarks/hle/hle_adapter.py +127 -93
- evalscope/benchmarks/humaneval/humaneval_adapter.py +86 -55
- evalscope/benchmarks/ifeval/ifeval_adapter.py +69 -40
- evalscope/benchmarks/ifeval/instructions.py +109 -64
- evalscope/benchmarks/ifeval/instructions_registry.py +1 -1
- evalscope/benchmarks/ifeval/instructions_util.py +2 -3
- evalscope/benchmarks/ifeval/utils.py +6 -7
- evalscope/benchmarks/image_edit/gedit/__init__.py +0 -0
- evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
- evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
- evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
- evalscope/benchmarks/iquiz/iquiz_adapter.py +30 -65
- evalscope/benchmarks/live_code_bench/evaluate_utils.py +2 -2
- evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +121 -71
- evalscope/benchmarks/live_code_bench/load_utils.py +13 -21
- evalscope/benchmarks/live_code_bench/testing_util.py +6 -2
- evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +49 -75
- evalscope/benchmarks/math_500/math_500_adapter.py +41 -48
- evalscope/benchmarks/math_vista/__init__.py +0 -0
- evalscope/benchmarks/math_vista/math_vista_adapter.py +129 -0
- evalscope/benchmarks/mmlu/mmlu_adapter.py +32 -205
- evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +80 -99
- evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +64 -110
- evalscope/benchmarks/mmmu/__init__.py +0 -0
- evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
- evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
- evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +129 -0
- evalscope/benchmarks/musr/musr_adapter.py +33 -64
- evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +196 -152
- evalscope/benchmarks/process_bench/process_bench_adapter.py +144 -76
- evalscope/benchmarks/race/race_adapter.py +33 -119
- evalscope/benchmarks/simple_qa/simple_qa_adapter.py +72 -70
- evalscope/benchmarks/super_gpqa/{five_shot_prompt.txt → prompt.py} +14 -16
- evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +73 -117
- evalscope/benchmarks/super_gpqa/utils.py +2 -1
- evalscope/benchmarks/tau_bench/generation.py +147 -0
- evalscope/benchmarks/tau_bench/tau_bench_adapter.py +114 -60
- evalscope/benchmarks/text2image/__init__.py +0 -0
- evalscope/benchmarks/text2image/evalmuse_adapter.py +78 -0
- evalscope/benchmarks/text2image/genai_bench_adapter.py +53 -0
- evalscope/benchmarks/text2image/general_t2i_adapter.py +42 -0
- evalscope/benchmarks/text2image/hpdv2_adapter.py +52 -0
- evalscope/benchmarks/text2image/tifa_adapter.py +27 -0
- evalscope/benchmarks/tool_bench/tool_bench_adapter.py +91 -70
- evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +56 -124
- evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +70 -266
- evalscope/benchmarks/winogrande/winogrande_adapter.py +28 -54
- evalscope/cli/cli.py +2 -0
- evalscope/cli/start_app.py +7 -1
- evalscope/cli/start_perf.py +7 -1
- evalscope/cli/start_server.py +6 -3
- evalscope/collections/__init__.py +2 -10
- evalscope/collections/sampler.py +10 -10
- evalscope/collections/schema.py +13 -11
- evalscope/config.py +157 -57
- evalscope/constants.py +37 -61
- evalscope/evaluator/__init__.py +1 -1
- evalscope/evaluator/evaluator.py +275 -419
- evalscope/filters/__init__.py +2 -0
- evalscope/filters/extraction.py +126 -0
- evalscope/filters/selection.py +57 -0
- evalscope/metrics/__init__.py +13 -13
- evalscope/metrics/llm_judge.py +47 -33
- evalscope/metrics/math_parser.py +27 -22
- evalscope/metrics/metric.py +307 -0
- evalscope/metrics/metrics.py +22 -18
- evalscope/metrics/t2v_metrics/__init__.py +0 -52
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +4 -2
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +9 -13
- evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +2 -1
- evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +3 -2
- evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +2 -1
- evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +2 -2
- evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +2 -1
- evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +4 -2
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +10 -5
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +4 -2
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +2 -1
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +15 -9
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +4 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +15 -10
- evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +9 -6
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +2 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +4 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +4 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +3 -9
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +16 -10
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +3 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +4 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +8 -4
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +47 -25
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +12 -7
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +23 -17
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +33 -23
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +2 -1
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +46 -30
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +69 -37
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +7 -5
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +6 -4
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +7 -5
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +3 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +5 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +17 -13
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +35 -19
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +14 -12
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +63 -52
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +63 -38
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +6 -3
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +6 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +3 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +15 -13
- evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +3 -2
- evalscope/models/__init__.py +6 -29
- evalscope/models/image_edit_model.py +125 -0
- evalscope/models/mockllm.py +65 -0
- evalscope/models/model_apis.py +67 -0
- evalscope/models/modelscope.py +455 -0
- evalscope/models/openai_compatible.py +126 -0
- evalscope/models/text2image_model.py +124 -0
- evalscope/models/utils/openai.py +701 -0
- evalscope/perf/benchmark.py +4 -1
- evalscope/perf/http_client.py +4 -2
- evalscope/perf/plugin/api/custom_api.py +5 -4
- evalscope/perf/plugin/api/openai_api.py +11 -9
- evalscope/perf/plugin/datasets/custom.py +2 -1
- evalscope/perf/plugin/datasets/flickr8k.py +1 -1
- evalscope/perf/plugin/datasets/kontext_bench.py +1 -1
- evalscope/perf/plugin/datasets/line_by_line.py +2 -1
- evalscope/perf/plugin/datasets/longalpaca.py +2 -1
- evalscope/perf/plugin/datasets/openqa.py +4 -2
- evalscope/perf/utils/benchmark_util.py +15 -10
- evalscope/perf/utils/db_util.py +9 -6
- evalscope/perf/utils/local_server.py +11 -3
- evalscope/perf/utils/rich_display.py +16 -10
- evalscope/report/__init__.py +2 -3
- evalscope/report/combinator.py +18 -12
- evalscope/report/generator.py +51 -35
- evalscope/report/{utils.py → report.py} +8 -6
- evalscope/run.py +33 -47
- evalscope/summarizer.py +1 -1
- evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -4
- evalscope/utils/__init__.py +21 -2
- evalscope/utils/chat_service.py +3 -2
- evalscope/utils/deprecation_utils.py +12 -1
- evalscope/utils/function_utils.py +29 -0
- evalscope/utils/import_utils.py +23 -1
- evalscope/utils/io_utils.py +142 -6
- evalscope/utils/json_schema.py +208 -0
- evalscope/utils/logger.py +51 -12
- evalscope/utils/model_utils.py +11 -7
- evalscope/utils/multi_choices.py +288 -0
- evalscope/utils/url_utils.py +65 -0
- evalscope/version.py +2 -2
- {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/METADATA +108 -62
- {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/RECORD +258 -226
- tests/benchmark/test_eval.py +385 -0
- tests/benchmark/test_image_edit.py +65 -0
- tests/{aigc → benchmark}/test_t2i.py +22 -4
- tests/benchmark/test_vlm.py +80 -0
- tests/cli/test_all.py +85 -47
- tests/cli/test_collection.py +20 -8
- tests/cli/test_custom.py +22 -15
- tests/cli/test_reasoning.py +81 -0
- tests/common.py +73 -0
- tests/perf/test_perf.py +4 -2
- tests/rag/test_clip_benchmark.py +0 -2
- evalscope/benchmarks/aigc/t2i/base.py +0 -56
- evalscope/benchmarks/aigc/t2i/evalmuse_adapter.py +0 -78
- evalscope/benchmarks/aigc/t2i/genai_bench_adapter.py +0 -58
- evalscope/benchmarks/aigc/t2i/general_t2i_adapter.py +0 -58
- evalscope/benchmarks/aigc/t2i/hpdv2_adapter.py +0 -57
- evalscope/benchmarks/aigc/t2i/tifa_adapter.py +0 -37
- evalscope/benchmarks/arc/ai2_arc.py +0 -151
- evalscope/benchmarks/benchmark.py +0 -81
- evalscope/benchmarks/ceval/ceval_exam.py +0 -146
- evalscope/benchmarks/cmmlu/cmmlu.py +0 -161
- evalscope/benchmarks/cmmlu/samples.jsonl +0 -5
- evalscope/benchmarks/competition_math/competition_math.py +0 -79
- evalscope/benchmarks/data_adapter.py +0 -528
- evalscope/benchmarks/filters.py +0 -59
- evalscope/benchmarks/gsm8k/gsm8k.py +0 -121
- evalscope/benchmarks/hellaswag/hellaswag.py +0 -112
- evalscope/benchmarks/humaneval/humaneval.py +0 -79
- evalscope/benchmarks/mmlu/mmlu.py +0 -160
- evalscope/benchmarks/mmlu/samples.jsonl +0 -5
- evalscope/benchmarks/process_bench/critique_template.txt +0 -13
- evalscope/benchmarks/race/race.py +0 -104
- evalscope/benchmarks/race/samples.jsonl +0 -5
- evalscope/benchmarks/super_gpqa/zero_shot_prompt.txt +0 -4
- evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -89
- evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -163
- evalscope/benchmarks/utils.py +0 -60
- evalscope/collections/evaluator.py +0 -375
- evalscope/metrics/completion_parsers.py +0 -227
- evalscope/metrics/named_metrics.py +0 -55
- evalscope/models/adapters/__init__.py +0 -14
- evalscope/models/adapters/base_adapter.py +0 -84
- evalscope/models/adapters/bfcl_adapter.py +0 -246
- evalscope/models/adapters/chat_adapter.py +0 -207
- evalscope/models/adapters/choice_adapter.py +0 -222
- evalscope/models/adapters/custom_adapter.py +0 -71
- evalscope/models/adapters/server_adapter.py +0 -236
- evalscope/models/adapters/t2i_adapter.py +0 -79
- evalscope/models/adapters/tau_bench_adapter.py +0 -189
- evalscope/models/custom/__init__.py +0 -4
- evalscope/models/custom/custom_model.py +0 -50
- evalscope/models/custom/dummy_model.py +0 -99
- evalscope/models/local_model.py +0 -128
- evalscope/models/register.py +0 -41
- tests/cli/test_run.py +0 -489
- /evalscope/{benchmarks/aigc → api}/__init__.py +0 -0
- /evalscope/benchmarks/{aigc/t2i → image_edit}/__init__.py +0 -0
- {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/LICENSE +0 -0
- {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/WHEEL +0 -0
- {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/entry_points.txt +0 -0
- {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/top_level.txt +0 -0
- /tests/{aigc → benchmark}/__init__.py +0 -0
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
# flake8: noqa
|
|
2
|
+
FEW_SHOT_SAMPLES = """
|
|
1
3
|
Question: In a given population, 1 out of every 400 people has a cancer caused by a completely recessive allele, b. Assuming the population is in Hardy-Weinberg equilibrium, which of the following is the expected proportion of individuals who carry the b allele but are not expected to develop the cancer?
|
|
2
4
|
Choices:
|
|
3
5
|
(A) 1/400
|
|
@@ -9,7 +11,8 @@ The expected proportion of individuals who carry the b allele but are not expect
|
|
|
9
11
|
According to the Hardy-Weinberg equation p∧2 + 2pq + q∧2 = 1, where p is the frequency of dominant allele frequency, q is the frequency of recessive allele frequency, p∧2 is the frequency of the homozygous dominant allele, q∧2 is the frequency of the recessive allele, and 2pq is the frequency of the heterozygous allele.
|
|
10
12
|
Given that q∧2=1/400, hence, q=0.05 and p=1-q=0.95.
|
|
11
13
|
The frequency of the heterozygous allele is 2pq=2*0.05*0.95=38/400.
|
|
12
|
-
|
|
14
|
+
ANSWER: D
|
|
15
|
+
|
|
13
16
|
Question: A Fe pellet of 0.056 g is first dissolved in 10 mL of hydrobromic acid HBr (0.1 M). The resulting solution is then titrated by KMnO4 (0.02 M). How many equivalence points are there?
|
|
14
17
|
Choices:
|
|
15
18
|
(A) Two points, 25 ml and 35 ml
|
|
@@ -30,7 +33,8 @@ Reaction 1: MnO4- + 5Fe2+ + 8H+ → Mn2+ + 5Fe3+ + 4H2O
|
|
|
30
33
|
Reaction 2: 2MnO4- + 10Br- + 16H+ → 2Mn2+ + 5Br2 + 8H2O
|
|
31
34
|
So MnO4- will first react with Fe2+ with a stoichiometry of 1:5 so Veq1 will be 10 ml.
|
|
32
35
|
Then when Fe2+ is used up, MnO4- will react with Br- with a stoichiometry of 2:10 then V added will be 25 ml so Veq2=25+10=35 ml.
|
|
33
|
-
|
|
36
|
+
ANSWER: A
|
|
37
|
+
|
|
34
38
|
Question: Consider a quantum mechanical system containing a particle of mass $m$ moving in an istropic three dimensional potential of the form $V(r) = 1/2 m \omega^2 r^2$ corresponding to the acted force obeying Hooke’s law. Here, $\omega$ is the angular frequency of oscillation and $r$ is the radial distance of the particle from the origin in spherical polar coordinate. What is the value of energy of the third excited state, and how many linearly independent eigenfunctions are possible for the same energy eigenvalue?
|
|
35
39
|
Choices:
|
|
36
40
|
(A) 11 \pi^2 \hbar^2 / (2m r^2), 3
|
|
@@ -45,7 +49,8 @@ For third excited state n=3.
|
|
|
45
49
|
Thus the corresponding energy is $(9/2)\hbar \omega$.
|
|
46
50
|
The degeneracy of the state is $g_n= (n+1)(n+2)/2$.
|
|
47
51
|
For n=3, degeneracy is (3+1)*(3+2)/2=4*5/2=10.
|
|
48
|
-
|
|
52
|
+
ANSWER: B
|
|
53
|
+
|
|
49
54
|
Question: "Your overhear two chemists talking to each other as they leave a synthetic organic chemistry lab. One asks the other "So, how did it go?" The second chemist replies, "Not well - my compounds are on top of each other." What is the second chemist most likely referring to?"
|
|
50
55
|
Choices:
|
|
51
56
|
(A) The compounds they are working with have similar polarities.
|
|
@@ -55,7 +60,8 @@ Choices:
|
|
|
55
60
|
Let's think step by step:
|
|
56
61
|
"On top of each other" commonly refers to two compounds that have similar Rf values on chromatography (a common operation in synthetic chemistry).
|
|
57
62
|
Similar Rf values arise for compounds with similar polarities.
|
|
58
|
-
|
|
63
|
+
ANSWER: A
|
|
64
|
+
|
|
59
65
|
Question: Two people are playing the following game. A fair coin is tossed into the air. Person A says that in a single toss of the coin, the tail will come. So it's like the first shot or the third shot or the fifth shot. Person B says that the coin will come with a double toss. So like the second, fourth, sixth or eighth shot. Imagine this game played forever. What is the probability that person A wins this game?
|
|
60
66
|
Choices:
|
|
61
67
|
(A) 1/2
|
|
@@ -78,4 +84,5 @@ The solution for this series is as follows : a1/(1-r) where a1 is the first numb
|
|
|
78
84
|
a1=1/2
|
|
79
85
|
r=(1/2)^2=1/4
|
|
80
86
|
So a1/(1-r)=(1/2)/(1-1/4)=(1/2)/(3/4)=2/3.
|
|
81
|
-
|
|
87
|
+
ANSWER: C
|
|
88
|
+
"""
|
|
@@ -1,156 +1,76 @@
|
|
|
1
1
|
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
import
|
|
6
|
-
import
|
|
7
|
-
|
|
8
|
-
from evalscope.
|
|
9
|
-
from evalscope.
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict
|
|
4
|
+
|
|
5
|
+
from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
|
|
6
|
+
from evalscope.api.dataset import Sample
|
|
7
|
+
from evalscope.api.evaluator import TaskState
|
|
8
|
+
from evalscope.api.registry import register_benchmark
|
|
9
|
+
from evalscope.constants import Tags
|
|
10
10
|
from evalscope.utils.logger import get_logger
|
|
11
11
|
|
|
12
12
|
logger = get_logger()
|
|
13
13
|
|
|
14
|
+
PROMPT_TEMPLATE = """
|
|
15
|
+
Solve the following math problem step by step. The last line of your response should be of the form "ANSWER: $ANSWER" (without quotes) where $ANSWER is the answer to the problem.
|
|
16
|
+
|
|
17
|
+
{question}
|
|
18
|
+
|
|
19
|
+
Remember to put your answer on its own line at the end in the form "ANSWER: $ANSWER" (without quotes) where $ANSWER is the answer to the problem, and you do not need to use a \\boxed command.
|
|
20
|
+
|
|
21
|
+
Reasoning:
|
|
22
|
+
""".lstrip() # noqa: E501
|
|
23
|
+
|
|
24
|
+
FEWSHOT_TEMPLATE = """
|
|
25
|
+
Here are some examples of how to solve similar problems:
|
|
26
|
+
|
|
27
|
+
{fewshot}
|
|
14
28
|
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
29
|
+
""".lstrip() + PROMPT_TEMPLATE
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@register_benchmark(
|
|
33
|
+
BenchmarkMeta(
|
|
34
|
+
name='gsm8k',
|
|
35
|
+
pretty_name='GSM8K',
|
|
36
|
+
dataset_id='AI-ModelScope/gsm8k',
|
|
37
|
+
tags=[Tags.MATH, Tags.REASONING],
|
|
38
|
+
description=
|
|
39
|
+
'GSM8K (Grade School Math 8K) is a dataset of grade school math problems, designed to evaluate the mathematical reasoning abilities of AI models.', # noqa: E501
|
|
40
|
+
subset_list=['main'],
|
|
41
|
+
few_shot_num=4,
|
|
42
|
+
train_split='train',
|
|
43
|
+
eval_split='test',
|
|
44
|
+
metric_list=['acc'],
|
|
45
|
+
prompt_template=PROMPT_TEMPLATE,
|
|
46
|
+
few_shot_prompt_template=FEWSHOT_TEMPLATE,
|
|
47
|
+
)
|
|
28
48
|
)
|
|
29
|
-
class GSM8KAdapter(
|
|
49
|
+
class GSM8KAdapter(DefaultDataAdapter):
|
|
30
50
|
|
|
31
51
|
def __init__(self, **kwargs):
|
|
32
|
-
"""
|
|
33
|
-
Data adapter for GSM8K dataset.
|
|
34
|
-
|
|
35
|
-
Args:
|
|
36
|
-
subset_list (list): Subset list for the dataset. Default: ['main']
|
|
37
|
-
metric_list (list): Metric list for the dataset. Default: [{'name': 'AverageAccuracy', 'object': mean}]
|
|
38
|
-
few_shot_num (int): Number of few-shot examples. Default: 4
|
|
39
|
-
train_split (str): Train split name. Default: 'train'
|
|
40
|
-
eval_split (str): The target eval split name. Default: 'test'
|
|
41
|
-
**kwargs: ...
|
|
42
|
-
"""
|
|
43
|
-
few_shot_num = kwargs.get('few_shot_num', 4)
|
|
44
|
-
if few_shot_num != 4 and few_shot_num != 0:
|
|
45
|
-
logger.error(f'GSM8K uses 4-shot examples with CoT or 0-shot by system, but got {few_shot_num}. '
|
|
46
|
-
f'Use 4-shot by default.')
|
|
47
|
-
kwargs['few_shot_num'] = 4
|
|
48
|
-
|
|
49
52
|
super().__init__(**kwargs)
|
|
50
53
|
|
|
51
|
-
def
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
def gen_prompt(self, input_d: dict, few_shot_list: list, **kwargs) -> dict:
|
|
66
|
-
"""
|
|
67
|
-
Generate prompt for the model.
|
|
68
|
-
|
|
69
|
-
Args:
|
|
70
|
-
input_d (dict): The raw input. A single data format of the GSM8K:
|
|
71
|
-
{
|
|
72
|
-
"question": "Janet\\u2019s ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers\' market daily for $2 per fresh duck egg. How much in dollars does she make every day at the farmers\' market?",
|
|
73
|
-
"answer": "Janet sells 16 - 3 - 4 = <<16-3-4=9>>9 duck eggs a day.\\nShe makes 9 * 2 = $<<9*2=18>>18 every day at the farmer\\u2019s market.\\n#### 18"
|
|
74
|
-
}
|
|
75
|
-
"""
|
|
76
|
-
use_fewshot = self.few_shot_num > 0
|
|
77
|
-
context = self._generate_prompt(use_fewshot=use_fewshot)
|
|
78
|
-
|
|
79
|
-
full_prompt = context + self.prompt_template.format(query=input_d['question'])
|
|
80
|
-
|
|
81
|
-
return self.gen_prompt_data(full_prompt)
|
|
82
|
-
|
|
83
|
-
def get_gold_answer(self, input_d: dict) -> str:
|
|
84
|
-
# Extract the gold answer from the input dict.
|
|
85
|
-
ans: str = input_d.get('answer', '')
|
|
86
|
-
ans = self.extract_answer(ans).strip()
|
|
87
|
-
if not ans:
|
|
88
|
-
logger.error(f'No ground truth answer found in the input: {input_d}')
|
|
89
|
-
return ans
|
|
90
|
-
|
|
91
|
-
def parse_pred_result(self, result: str, raw_input_d: dict = None, eval_type: str = 'checkpoint') -> str:
|
|
92
|
-
"""
|
|
93
|
-
Parse the model output to get the answer. Could be the best choice index.
|
|
94
|
-
|
|
95
|
-
Args:
|
|
96
|
-
result: Predicted answer from the model. Usually a string for chat.
|
|
97
|
-
raw_input_d (dict): The raw input. Depending on the dataset.
|
|
98
|
-
eval_type (str): 'checkpoint' or 'service'
|
|
99
|
-
|
|
100
|
-
Returns:
|
|
101
|
-
The parsed answer. Depending on the dataset. Usually a string for chat.
|
|
102
|
-
"""
|
|
103
|
-
# Note: to use same extraction method for both of checkpoint and custom.
|
|
104
|
-
return self.extract_answer(result)
|
|
105
|
-
|
|
106
|
-
def match(self, gold: str, pred: str) -> float:
|
|
107
|
-
"""
|
|
108
|
-
Match the gold answer and predicted answer.
|
|
109
|
-
|
|
110
|
-
Args:
|
|
111
|
-
gold (str): The golden answer. Note: to be extracted.
|
|
112
|
-
pred (str): The extracted prediction. Usually a string for chat/multiple-choice-questions.
|
|
113
|
-
e.g. 'B'
|
|
114
|
-
"""
|
|
115
|
-
|
|
116
|
-
def number_equal(gold_ans, pred_ans):
|
|
117
|
-
if pred_ans is None:
|
|
118
|
-
return False
|
|
119
|
-
try:
|
|
120
|
-
return math.isclose(eval(gold_ans), eval(pred_ans), rel_tol=0, abs_tol=1e-4)
|
|
121
|
-
except:
|
|
122
|
-
logger.warning(f'##report##Cannot compare two numbers: gold_ans={gold_ans}, pred_ans={pred_ans}')
|
|
123
|
-
return False
|
|
124
|
-
|
|
125
|
-
return number_equal(gold_ans=gold, pred_ans=pred)
|
|
126
|
-
|
|
127
|
-
@classmethod
|
|
128
|
-
def _generate_prompt(cls, use_fewshot: bool = True) -> str:
|
|
129
|
-
if use_fewshot:
|
|
130
|
-
# Use 4-shot examples by system
|
|
131
|
-
context = (
|
|
132
|
-
"Question: Angelo and Melanie want to plan how many hours over the next week they should study together for their test next week. They have 2 chapters of their textbook to study and 4 worksheets to memorize. They figure out that they should dedicate 3 hours to each chapter of their textbook and 1.5 hours for each worksheet. If they plan to study no more than 4 hours each day, how many days should they plan to study total over the next week if they take a 10-minute break every hour, include 3 10-minute snack breaks each day, and 30 minutes for lunch each day?\nLet's think step by step\n"
|
|
133
|
-
'Angelo and Melanie think they should dedicate 3 hours to each of the 2 chapters, 3 hours x 2 chapters = 6 hours total.\nFor the worksheets they plan to dedicate 1.5 hours for each worksheet, 1.5 hours x 4 worksheets = 6 hours total.\nAngelo and Melanie need to start with planning 12 hours to study, at 4 hours a day, 12 / 4 = 3 days.\nHowever, they need to include time for breaks and lunch. Every hour they want to include a 10-minute break, so 12 total hours x 10 minutes = 120 extra minutes for breaks.\nThey also want to include 3 10-minute snack breaks, 3 x 10 minutes = 30 minutes.\nAnd they want to include 30 minutes for lunch each day, so 120 minutes for breaks + 30 minutes for snack breaks + 30 minutes for lunch = 180 minutes, or 180 / 60 minutes per hour = 3 extra hours.\nSo Angelo and Melanie want to plan 12 hours to study + 3 hours of breaks = 15 hours total.\nThey want to study no more than 4 hours each day, 15 hours / 4 hours each day = 3.75\nThey will need to plan to study 4 days to allow for all the time they need.\nThe answer is 4\n\n'
|
|
134
|
-
"Question: Mark's basketball team scores 25 2 pointers, 8 3 pointers and 10 free throws. Their opponents score double the 2 pointers but half the 3 pointers and free throws. What's the total number of points scored by both teams added together?\nLet's think step by step\n"
|
|
135
|
-
"Mark's team scores 25 2 pointers, meaning they scored 25*2= 50 points in 2 pointers.\nHis team also scores 6 3 pointers, meaning they scored 8*3= 24 points in 3 pointers\nThey scored 10 free throws, and free throws count as one point so they scored 10*1=10 points in free throws.\nAll together his team scored 50+24+10= 84 points\nMark's opponents scored double his team's number of 2 pointers, meaning they scored 50*2=100 points in 2 pointers.\nHis opponents scored half his team's number of 3 pointers, meaning they scored 24/2= 12 points in 3 pointers.\nThey also scored half Mark's team's points in free throws, meaning they scored 10/2=5 points in free throws.\nAll together Mark's opponents scored 100+12+5=117 points\nThe total score for the game is both team's scores added together, so it is 84+117=201 points\nThe answer is 201\n\n"
|
|
136
|
-
"Question: Bella has two times as many marbles as frisbees. She also has 20 more frisbees than deck cards. If she buys 2/5 times more of each item, what would be the total number of the items she will have if she currently has 60 marbles?\nLet's think step by step\n"
|
|
137
|
-
"When Bella buys 2/5 times more marbles, she'll have increased the number of marbles by 2/5*60 = 24\nThe total number of marbles she'll have is 60+24 = 84\nIf Bella currently has 60 marbles, and she has two times as many marbles as frisbees, she has 60/2 = 30 frisbees.\nIf Bella buys 2/5 times more frisbees, she'll have 2/5*30 = 12 more frisbees.\nThe total number of frisbees she'll have will increase to 30+12 = 42\nBella also has 20 more frisbees than deck cards, meaning she has 30-20 = 10 deck cards\nIf she buys 2/5 times more deck cards, she'll have 2/5*10 = 4 more deck cards.\nThe total number of deck cards she'll have is 10+4 = 14\nTogether, Bella will have a total of 14+42+84 = 140 items\nThe answer is 140\n\n"
|
|
138
|
-
"Question: A group of 4 fruit baskets contains 9 apples, 15 oranges, and 14 bananas in the first three baskets and 2 less of each fruit in the fourth basket. How many fruits are there?\nLet's think step by step\n"
|
|
139
|
-
'For the first three baskets, the number of apples and oranges in one basket is 9+15=24\nIn total, together with bananas, the number of fruits in one basket is 24+14=38 for the first three baskets.\nSince there are three baskets each having 38 fruits, there are 3*38=114 fruits in the first three baskets.\nThe number of apples in the fourth basket is 9-2=7\nThere are also 15-2=13 oranges in the fourth basket\nThe combined number of oranges and apples in the fourth basket is 13+7=20\nThe fourth basket also contains 14-2=12 bananas.\nIn total, the fourth basket has 20+12=32 fruits.\nThe four baskets together have 32+114=146 fruits.\nThe answer is 146\n\n'
|
|
54
|
+
def record_to_sample(self, record: Dict[str, Any]) -> Sample:
|
|
55
|
+
DELIM = '####'
|
|
56
|
+
question = record['question']
|
|
57
|
+
answer = record['answer'].split(DELIM)
|
|
58
|
+
target = answer.pop().strip()
|
|
59
|
+
reasoning = DELIM.join(answer)
|
|
60
|
+
|
|
61
|
+
return Sample(input=question, target=target, metadata={'reasoning': reasoning.strip()})
|
|
62
|
+
|
|
63
|
+
def sample_to_fewshot(self, sample: Sample) -> str:
|
|
64
|
+
if sample.metadata:
|
|
65
|
+
return (
|
|
66
|
+
f'{sample.input}\n\nReasoning:\n' + f"{sample.metadata['reasoning']}\n\n" + f'ANSWER: {sample.target}'
|
|
140
67
|
)
|
|
141
68
|
else:
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
def extract_answer(s: str) -> str:
|
|
147
|
-
_PAT_LAST_DIGIT = re.compile(r'([+-])?(?=([0-9]|\.[0-9]))(0|([1-9](\d{0,2}(,\d{3})*)|\d*))?(\.\d*)?(?=\D|$)')
|
|
148
|
-
match = list(_PAT_LAST_DIGIT.finditer(s))
|
|
149
|
-
if match:
|
|
150
|
-
last_digit = match[-1].group().replace(',', '').replace('+', '').strip().strip('.')
|
|
151
|
-
# print(f"The last digit in {s} is {last_digit}")
|
|
152
|
-
else:
|
|
153
|
-
last_digit = None
|
|
154
|
-
print(f'No digits found in {s!r}', flush=True)
|
|
69
|
+
return ''
|
|
70
|
+
|
|
71
|
+
def extract_answer(self, prediction: str, task_state: TaskState):
|
|
72
|
+
from evalscope.filters.extraction import RegexFilter
|
|
155
73
|
|
|
156
|
-
|
|
74
|
+
regex = RegexFilter(regex_pattern=r'(-?[0-9.,]{2,})|(-?[0-9]+)', group_select=-1)
|
|
75
|
+
res = regex(prediction)
|
|
76
|
+
return res.replace(',', '').replace('+', '').strip().strip('.')
|
|
@@ -3,118 +3,56 @@ import numpy as np
|
|
|
3
3
|
import os
|
|
4
4
|
import re
|
|
5
5
|
|
|
6
|
-
from evalscope.
|
|
7
|
-
from evalscope.
|
|
8
|
-
from evalscope.
|
|
9
|
-
from evalscope.
|
|
10
|
-
from evalscope.utils.io_utils import jsonl_to_list
|
|
6
|
+
from evalscope.api.benchmark import BenchmarkMeta, MultiChoiceAdapter
|
|
7
|
+
from evalscope.api.dataset import Sample
|
|
8
|
+
from evalscope.api.registry import register_benchmark
|
|
9
|
+
from evalscope.constants import Tags
|
|
11
10
|
from evalscope.utils.logger import get_logger
|
|
11
|
+
from evalscope.utils.multi_choices import MultipleChoiceTemplate
|
|
12
12
|
|
|
13
13
|
# flake8: noqa
|
|
14
14
|
|
|
15
15
|
logger = get_logger()
|
|
16
16
|
|
|
17
17
|
|
|
18
|
-
@
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
18
|
+
@register_benchmark(
|
|
19
|
+
BenchmarkMeta(
|
|
20
|
+
name='hellaswag',
|
|
21
|
+
pretty_name='HellaSwag',
|
|
22
|
+
tags=[Tags.COMMONSENSE, Tags.MULTIPLE_CHOICE, Tags.KNOWLEDGE],
|
|
23
|
+
description=
|
|
24
|
+
'HellaSwag is a benchmark for commonsense reasoning in natural language understanding tasks. It consists of multiple-choice questions where the model must select the most plausible continuation of a given context.',
|
|
25
|
+
dataset_id='evalscope/hellaswag',
|
|
26
|
+
metric_list=['acc'],
|
|
27
|
+
subset_list=['default'],
|
|
28
|
+
few_shot_num=0,
|
|
29
|
+
train_split=None,
|
|
30
|
+
eval_split='validation',
|
|
31
|
+
prompt_template=MultipleChoiceTemplate.SINGLE_ANSWER,
|
|
32
|
+
)
|
|
33
33
|
)
|
|
34
|
-
class HellaSwagAdapter(
|
|
34
|
+
class HellaSwagAdapter(MultiChoiceAdapter):
|
|
35
35
|
|
|
36
36
|
def __init__(self, **kwargs):
|
|
37
|
-
|
|
38
|
-
few_shot_num = kwargs.get('few_shot_num', 0)
|
|
39
|
-
if few_shot_num != 0:
|
|
40
|
-
logger.warning(f'few_shot_num should be 0 for HellaSwag, but got {few_shot_num}. Use 0-shot by default.')
|
|
41
|
-
kwargs['few_shot_num'] = 0
|
|
42
|
-
|
|
43
37
|
super().__init__(**kwargs)
|
|
44
|
-
self.choices = ['A', 'B', 'C', 'D']
|
|
45
|
-
|
|
46
|
-
def load_from_disk(self, dataset_name_or_path, subset_list, work_dir, **kwargs) -> dict:
|
|
47
|
-
data_dict = {}
|
|
48
|
-
for subset_name in subset_list:
|
|
49
|
-
data_dict[subset_name] = {}
|
|
50
|
-
for split in [self.train_split, self.eval_split]:
|
|
51
|
-
if os.path.exists(dataset_name_or_path):
|
|
52
|
-
file_path = os.path.join(dataset_name_or_path, f'hellaswag_{split}.jsonl')
|
|
53
|
-
else:
|
|
54
|
-
file_path = os.path.join(work_dir, dataset_name_or_path, f'hellaswag_{split}.jsonl')
|
|
55
|
-
if os.path.exists(file_path):
|
|
56
|
-
data_dict[subset_name][split] = jsonl_to_list(file_path)
|
|
57
|
-
|
|
58
|
-
return data_dict
|
|
59
|
-
|
|
60
|
-
def gen_prompt(self, input_d: dict, few_shot_list: list, **kwargs) -> dict:
|
|
61
|
-
"""
|
|
62
|
-
Generate model prompt from raw data, unify the prompt format for HellaSwag benchmark.
|
|
63
|
-
|
|
64
|
-
Args:
|
|
65
|
-
input_d (dict): The raw input. A single data format of the HellaSwag:
|
|
66
|
-
|
|
67
|
-
{
|
|
68
|
-
'ind': 4,
|
|
69
|
-
'activity_label': 'Removing ice from car',
|
|
70
|
-
'ctx_a': 'Then, the man writes over the snow covering the window of a car, and a woman wearing winter clothes smiles.',
|
|
71
|
-
'ctx_b': 'then',
|
|
72
|
-
'ctx': 'Then, the man writes over the snow covering the window of a car, and a woman wearing winter clothes smiles. then',
|
|
73
|
-
'endings': [', the man adds wax to the windshield and cuts it.', ', a person board a ski lift, while two men supporting the head of the person wearing winter clothes snow as the we girls sled.', ', the man puts on a christmas coat, knitted with netting.', ', the man continues removing the snow on his car.'],
|
|
74
|
-
'source_id': 'activitynet~v_-1IBHYS3L-Y',
|
|
75
|
-
'split': 'train',
|
|
76
|
-
'split_type': 'indomain',
|
|
77
|
-
'label': '3'
|
|
78
|
-
}
|
|
79
|
-
|
|
80
|
-
Returns:
|
|
81
|
-
Refer to function: evalscope.benchmarks.data_adapter.DataAdapter.gen_prompt for details.
|
|
82
|
-
"""
|
|
83
|
-
|
|
84
|
-
endings: list = [self._preprocess(ending) for ending in input_d['endings']]
|
|
85
|
-
|
|
86
|
-
few_shot_prompts = [
|
|
87
|
-
self._generate_prompt(input_d=sample, endings=endings, include_answer=True) for sample in few_shot_list
|
|
88
|
-
]
|
|
89
|
-
context: str = '\n'.join(few_shot_prompts) + '\n'
|
|
90
|
-
query = context.strip() + self._generate_prompt(input_d=input_d, endings=endings, include_answer=False)
|
|
91
38
|
|
|
92
|
-
|
|
93
|
-
|
|
39
|
+
def record_to_sample(self, record) -> Sample:
|
|
40
|
+
# Preprocess endings
|
|
41
|
+
endings = [self._preprocess(ending) for ending in record['endings']]
|
|
94
42
|
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
43
|
+
# Create context
|
|
44
|
+
ctx = record['ctx_a'] + ' ' + record['ctx_b'].capitalize()
|
|
45
|
+
context = self._preprocess(ctx)
|
|
98
46
|
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
Parse the model output to get the answer. Could be the best choice index.
|
|
47
|
+
# Get target choice letter
|
|
48
|
+
target_letter = ['A', 'B', 'C', 'D'][int(record['label'])]
|
|
102
49
|
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
The parsed answer. Depending on the dataset. Usually a string for chat.
|
|
110
|
-
"""
|
|
111
|
-
if self.model_adapter == OutputType.MULTIPLE_CHOICE:
|
|
112
|
-
return result
|
|
113
|
-
else:
|
|
114
|
-
return ResponseParser.parse_first_option(result, options=self.choices)
|
|
115
|
-
|
|
116
|
-
def match(self, gold: str, pred: str) -> float:
|
|
117
|
-
return exact_match(gold=str(gold), pred=str(pred))
|
|
50
|
+
return Sample(
|
|
51
|
+
input=context,
|
|
52
|
+
choices=endings,
|
|
53
|
+
target=target_letter,
|
|
54
|
+
metadata={'activity_label': record.get('activity_label', 'unknown')},
|
|
55
|
+
)
|
|
118
56
|
|
|
119
57
|
def _preprocess(self, text):
|
|
120
58
|
text = text.strip()
|
|
@@ -122,30 +60,3 @@ class HellaSwagAdapter(DataAdapter):
|
|
|
122
60
|
text = re.sub('\\[.*?\\]', '', text)
|
|
123
61
|
text = text.replace(' ', ' ')
|
|
124
62
|
return text
|
|
125
|
-
|
|
126
|
-
def _generate_prompt(self, input_d: dict, endings: list, include_answer=True) -> str:
|
|
127
|
-
"""
|
|
128
|
-
Generate prompt for HellaSwag dataset.
|
|
129
|
-
|
|
130
|
-
Args:
|
|
131
|
-
input_d: a single data of the hellaswag.
|
|
132
|
-
endings: preprocessed endings
|
|
133
|
-
include_answer: bool
|
|
134
|
-
|
|
135
|
-
Returns:
|
|
136
|
-
|
|
137
|
-
"""
|
|
138
|
-
|
|
139
|
-
ctx = input_d['ctx_a'] + ' ' + input_d['ctx_b'].capitalize()
|
|
140
|
-
# example: str = cls._preprocess(input_d['activity_label'] + ': ' + ctx)
|
|
141
|
-
example: str = self._preprocess(ctx)
|
|
142
|
-
|
|
143
|
-
example += '\nQuestion: Which ending makes the most sense?'
|
|
144
|
-
for i, ending in enumerate(endings):
|
|
145
|
-
example += f'\n{self.choices[i]}. {ending}'
|
|
146
|
-
example += '\nYou may choose from A, B, C, D. Derive your final answer as `The answer is ...`.'
|
|
147
|
-
|
|
148
|
-
if include_answer:
|
|
149
|
-
example += '{}\n\n'.format(endings[int(input_d['label'])])
|
|
150
|
-
|
|
151
|
-
return example
|