evalscope 0.10.0__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- evalscope/__init__.py +4 -1
- evalscope/api/benchmark/__init__.py +11 -0
- evalscope/api/benchmark/adapters/__init__.py +7 -0
- evalscope/api/benchmark/adapters/agent_adapter.py +8 -0
- evalscope/api/benchmark/adapters/default_data_adapter.py +754 -0
- evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
- evalscope/api/benchmark/adapters/multi_choice_adapter.py +86 -0
- evalscope/api/benchmark/adapters/ner_adapter.py +212 -0
- evalscope/api/benchmark/adapters/text2image_adapter.py +157 -0
- evalscope/api/benchmark/adapters/vision_language_adapter.py +8 -0
- evalscope/api/benchmark/benchmark.py +404 -0
- evalscope/api/benchmark/meta.py +124 -0
- evalscope/api/dataset/__init__.py +2 -0
- evalscope/api/dataset/dataset.py +370 -0
- evalscope/api/dataset/loader.py +266 -0
- evalscope/api/dataset/utils.py +143 -0
- evalscope/api/evaluator/__init__.py +3 -0
- evalscope/api/evaluator/cache.py +382 -0
- evalscope/api/evaluator/evaluator.py +61 -0
- evalscope/api/evaluator/state.py +280 -0
- evalscope/api/filter/__init__.py +1 -0
- evalscope/api/filter/filter.py +72 -0
- evalscope/api/messages/__init__.py +12 -0
- evalscope/api/messages/chat_message.py +248 -0
- evalscope/api/messages/content.py +102 -0
- evalscope/api/messages/utils.py +35 -0
- evalscope/api/metric/__init__.py +2 -0
- evalscope/api/metric/metric.py +60 -0
- evalscope/api/metric/scorer.py +113 -0
- evalscope/api/mixin/__init__.py +2 -0
- evalscope/api/mixin/llm_judge_mixin.py +170 -0
- evalscope/api/mixin/sandbox_mixin.py +182 -0
- evalscope/api/model/__init__.py +12 -0
- evalscope/api/model/generate_config.py +161 -0
- evalscope/api/model/model.py +386 -0
- evalscope/api/model/model_output.py +285 -0
- evalscope/api/registry.py +182 -0
- evalscope/api/tool/__init__.py +3 -0
- evalscope/api/tool/tool_call.py +101 -0
- evalscope/api/tool/tool_info.py +173 -0
- evalscope/api/tool/utils.py +64 -0
- evalscope/app/__init__.py +28 -0
- evalscope/app/app.py +38 -0
- evalscope/app/arguments.py +11 -0
- evalscope/app/constants.py +22 -0
- evalscope/app/ui/__init__.py +20 -0
- evalscope/app/ui/app_ui.py +53 -0
- evalscope/app/ui/multi_model.py +353 -0
- evalscope/app/ui/sidebar.py +42 -0
- evalscope/app/ui/single_model.py +220 -0
- evalscope/app/ui/visualization.py +36 -0
- evalscope/app/utils/data_utils.py +195 -0
- evalscope/app/utils/env_utils.py +12 -0
- evalscope/app/utils/localization.py +221 -0
- evalscope/app/utils/text_utils.py +119 -0
- evalscope/app/utils/visualization.py +96 -0
- evalscope/arguments.py +32 -9
- evalscope/backend/opencompass/api_meta_template.py +2 -1
- evalscope/backend/opencompass/backend_manager.py +10 -7
- evalscope/backend/rag_eval/__init__.py +1 -1
- evalscope/backend/rag_eval/backend_manager.py +23 -6
- evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +33 -21
- evalscope/backend/rag_eval/clip_benchmark/task_template.py +8 -4
- evalscope/backend/rag_eval/cmteb/arguments.py +14 -1
- evalscope/backend/rag_eval/cmteb/task_template.py +19 -3
- evalscope/backend/rag_eval/cmteb/tasks/CustomTask.py +1 -1
- evalscope/backend/rag_eval/ragas/arguments.py +0 -1
- evalscope/backend/rag_eval/ragas/task_template.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/build_transform.py +7 -4
- evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +9 -3
- evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +2 -6
- evalscope/backend/rag_eval/utils/embedding.py +125 -32
- evalscope/backend/rag_eval/utils/llm.py +16 -16
- evalscope/backend/vlm_eval_kit/backend_manager.py +8 -3
- evalscope/benchmarks/__init__.py +17 -5
- evalscope/benchmarks/aa_lcr/__init__.py +0 -0
- evalscope/benchmarks/aa_lcr/aa_lcr_adapter.py +205 -0
- evalscope/benchmarks/ai2d/__init__.py +0 -0
- evalscope/benchmarks/ai2d/ai2d_adapter.py +54 -0
- evalscope/benchmarks/aime/__init__.py +0 -0
- evalscope/benchmarks/aime/aime24_adapter.py +55 -0
- evalscope/benchmarks/aime/aime25_adapter.py +181 -0
- evalscope/benchmarks/aime/grader.py +307 -0
- evalscope/{metrics/math_accuracy.py → benchmarks/aime/math_normalize.py} +61 -72
- evalscope/benchmarks/alpaca_eval/__init__.py +0 -0
- evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +133 -0
- evalscope/benchmarks/amc/__init__.py +0 -0
- evalscope/benchmarks/amc/amc_adapter.py +51 -0
- evalscope/benchmarks/arc/arc_adapter.py +34 -149
- evalscope/benchmarks/arena_hard/__init__.py +0 -0
- evalscope/benchmarks/arena_hard/arena_hard_adapter.py +149 -0
- evalscope/benchmarks/arena_hard/utils.py +186 -0
- evalscope/benchmarks/bbh/bbh_adapter.py +117 -157
- evalscope/benchmarks/bfcl/__init__.py +0 -0
- evalscope/benchmarks/bfcl/v3/__init__.py +0 -0
- evalscope/benchmarks/bfcl/v3/bfcl_v3_adapter.py +370 -0
- evalscope/benchmarks/bfcl/v3/generation.py +222 -0
- evalscope/benchmarks/bfcl/v3/utils.py +23 -0
- evalscope/benchmarks/bfcl/v4/__init__.py +0 -0
- evalscope/benchmarks/bfcl/v4/bfcl_v4_adapter.py +229 -0
- evalscope/benchmarks/bfcl/v4/utils.py +410 -0
- evalscope/benchmarks/biomix_qa/__init__.py +0 -0
- evalscope/benchmarks/biomix_qa/biomix_qa_adapter.py +36 -0
- evalscope/benchmarks/blink/__init__.py +0 -0
- evalscope/benchmarks/blink/blink_adapter.py +61 -0
- evalscope/benchmarks/ceval/ceval_adapter.py +93 -174
- evalscope/benchmarks/chartqa/__init__.py +0 -0
- evalscope/benchmarks/chartqa/chartqa_adapter.py +80 -0
- evalscope/benchmarks/chartqa/utils.py +38 -0
- evalscope/benchmarks/chinese_simple_qa/__init__.py +0 -0
- evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +170 -0
- evalscope/benchmarks/cmmlu/cmmlu_adapter.py +34 -140
- evalscope/benchmarks/coin_flip/__init__.py +0 -0
- evalscope/benchmarks/coin_flip/coin_flip_adapter.py +128 -0
- evalscope/benchmarks/commonsense_qa/__init__.py +0 -0
- evalscope/benchmarks/commonsense_qa/commonsense_qa_adapter.py +32 -0
- evalscope/benchmarks/competition_math/competition_math_adapter.py +64 -112
- evalscope/benchmarks/data_collection/__init__.py +0 -0
- evalscope/benchmarks/data_collection/data_collection_adapter.py +215 -0
- evalscope/benchmarks/docmath/__init__.py +0 -0
- evalscope/benchmarks/docmath/docmath_adapter.py +143 -0
- evalscope/benchmarks/docmath/utils.py +219 -0
- evalscope/benchmarks/docvqa/__init__.py +0 -0
- evalscope/benchmarks/docvqa/docvqa_adapter.py +67 -0
- evalscope/benchmarks/drivelology/__init__.py +0 -0
- evalscope/benchmarks/drivelology/drivelology_binary_adapter.py +170 -0
- evalscope/benchmarks/drivelology/drivelology_multilabel_adapter.py +254 -0
- evalscope/benchmarks/drivelology/drivelology_selection_adapter.py +49 -0
- evalscope/benchmarks/drivelology/drivelology_writing_adapter.py +218 -0
- evalscope/benchmarks/drop/__init__.py +0 -0
- evalscope/benchmarks/drop/drop_adapter.py +155 -0
- evalscope/benchmarks/drop/utils.py +156 -0
- evalscope/benchmarks/frames/__init__.py +0 -0
- evalscope/benchmarks/frames/frames_adapter.py +175 -0
- evalscope/benchmarks/frames/utils.py +37 -0
- evalscope/benchmarks/general_arena/__init__.py +0 -0
- evalscope/benchmarks/general_arena/general_arena_adapter.py +454 -0
- evalscope/benchmarks/general_arena/utils.py +223 -0
- evalscope/benchmarks/general_mcq/__init__.py +0 -0
- evalscope/benchmarks/general_mcq/general_mcq_adapter.py +58 -0
- evalscope/benchmarks/general_qa/general_qa_adapter.py +75 -107
- evalscope/benchmarks/gpqa/__init__.py +0 -0
- evalscope/benchmarks/gpqa/gpqa_adapter.py +90 -0
- evalscope/benchmarks/gpqa/prompt.py +88 -0
- evalscope/benchmarks/gsm8k/gsm8k_adapter.py +77 -144
- evalscope/benchmarks/hallusion_bench/__init__.py +0 -0
- evalscope/benchmarks/hallusion_bench/hallusion_bench_adapter.py +159 -0
- evalscope/benchmarks/halu_eval/__init__.py +0 -0
- evalscope/benchmarks/halu_eval/halu_eval_adapter.py +128 -0
- evalscope/benchmarks/halu_eval/halu_eval_instructions.py +84 -0
- evalscope/benchmarks/healthbench/__init__.py +0 -0
- evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
- evalscope/benchmarks/healthbench/utils.py +102 -0
- evalscope/benchmarks/hellaswag/hellaswag_adapter.py +36 -134
- evalscope/benchmarks/hle/__init__.py +0 -0
- evalscope/benchmarks/hle/hle_adapter.py +153 -0
- evalscope/benchmarks/humaneval/humaneval_adapter.py +80 -88
- evalscope/benchmarks/humaneval/utils.py +235 -0
- evalscope/benchmarks/ifeval/ifeval_adapter.py +71 -45
- evalscope/benchmarks/ifeval/instructions.py +112 -68
- evalscope/benchmarks/ifeval/instructions_registry.py +1 -1
- evalscope/benchmarks/ifeval/instructions_util.py +2 -3
- evalscope/benchmarks/ifeval/utils.py +6 -7
- evalscope/benchmarks/image_edit/__init__.py +0 -0
- evalscope/benchmarks/image_edit/gedit/__init__.py +0 -0
- evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
- evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
- evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
- evalscope/benchmarks/infovqa/__init__.py +0 -0
- evalscope/benchmarks/infovqa/infovqa_adapter.py +66 -0
- evalscope/benchmarks/iquiz/iquiz_adapter.py +30 -58
- evalscope/benchmarks/live_code_bench/__init__.py +0 -0
- evalscope/benchmarks/live_code_bench/evaluate_utils.py +195 -0
- evalscope/benchmarks/live_code_bench/extract_utils.py +70 -0
- evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +150 -0
- evalscope/benchmarks/live_code_bench/load_utils.py +63 -0
- evalscope/benchmarks/live_code_bench/pass_k_utils.py +56 -0
- evalscope/benchmarks/live_code_bench/prompts.py +207 -0
- evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
- evalscope/benchmarks/live_code_bench/testing_util.py +544 -0
- evalscope/benchmarks/logi_qa/__int__.py +0 -0
- evalscope/benchmarks/logi_qa/logi_qa_adapter.py +41 -0
- evalscope/benchmarks/maritime_bench/__init__.py +0 -0
- evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +56 -0
- evalscope/benchmarks/math_500/__init__.py +0 -0
- evalscope/benchmarks/math_500/math_500_adapter.py +55 -0
- evalscope/benchmarks/math_qa/__init__.py +0 -0
- evalscope/benchmarks/math_qa/math_qa_adapter.py +35 -0
- evalscope/benchmarks/math_verse/__init__.py +0 -0
- evalscope/benchmarks/math_verse/math_verse_adapter.py +105 -0
- evalscope/benchmarks/math_vision/__init__.py +0 -0
- evalscope/benchmarks/math_vision/math_vision_adapter.py +116 -0
- evalscope/benchmarks/math_vista/__init__.py +0 -0
- evalscope/benchmarks/math_vista/math_vista_adapter.py +114 -0
- evalscope/benchmarks/med_mcqa/__init__.py +0 -0
- evalscope/benchmarks/med_mcqa/med_mcqa_adapter.py +32 -0
- evalscope/benchmarks/minerva_math/__init__.py +0 -0
- evalscope/benchmarks/minerva_math/minerva_math_adapter.py +53 -0
- evalscope/benchmarks/mm_bench/__init__.py +0 -0
- evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
- evalscope/benchmarks/mm_star/__init__.py +0 -0
- evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
- evalscope/benchmarks/mmlu/mmlu_adapter.py +32 -210
- evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +87 -103
- evalscope/benchmarks/mmlu_redux/__init__.py +0 -0
- evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +139 -0
- evalscope/benchmarks/mmmu/__init__.py +0 -0
- evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
- evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
- evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +124 -0
- evalscope/benchmarks/mri_mcqa/__init__.py +0 -0
- evalscope/benchmarks/mri_mcqa/mri_mcqa_adapter.py +34 -0
- evalscope/benchmarks/multi_if/__init__.py +0 -0
- evalscope/benchmarks/multi_if/ifeval.py +3354 -0
- evalscope/benchmarks/multi_if/metrics.py +120 -0
- evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
- evalscope/benchmarks/music_trivia/__init__.py +0 -0
- evalscope/benchmarks/music_trivia/music_trivia_adapter.py +36 -0
- evalscope/benchmarks/musr/__init__.py +0 -0
- evalscope/benchmarks/musr/musr_adapter.py +43 -0
- evalscope/benchmarks/needle_haystack/__init__.py +0 -0
- evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +389 -0
- evalscope/benchmarks/needle_haystack/utils.py +79 -0
- evalscope/benchmarks/ner/__init__.py +0 -0
- evalscope/benchmarks/ner/broad_twitter_corpus_adapter.py +52 -0
- evalscope/benchmarks/ner/conll2003_adapter.py +48 -0
- evalscope/benchmarks/ner/copious_adapter.py +85 -0
- evalscope/benchmarks/ner/cross_ner_adapter.py +120 -0
- evalscope/benchmarks/ner/cross_ner_entities/__init__.py +0 -0
- evalscope/benchmarks/ner/cross_ner_entities/ai.py +54 -0
- evalscope/benchmarks/ner/cross_ner_entities/literature.py +36 -0
- evalscope/benchmarks/ner/cross_ner_entities/music.py +39 -0
- evalscope/benchmarks/ner/cross_ner_entities/politics.py +37 -0
- evalscope/benchmarks/ner/cross_ner_entities/science.py +58 -0
- evalscope/benchmarks/ner/genia_ner_adapter.py +66 -0
- evalscope/benchmarks/ner/harvey_ner_adapter.py +58 -0
- evalscope/benchmarks/ner/mit_movie_trivia_adapter.py +74 -0
- evalscope/benchmarks/ner/mit_restaurant_adapter.py +66 -0
- evalscope/benchmarks/ner/ontonotes5_adapter.py +87 -0
- evalscope/benchmarks/ner/wnut2017_adapter.py +61 -0
- evalscope/benchmarks/ocr_bench/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench/ocr_bench_adapter.py +101 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/IoUscore_metric.py +87 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/TEDS_metric.py +963 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/ocr_bench_v2_adapter.py +161 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/page_ocr_metric.py +50 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/parallel.py +46 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/readme.txt +26 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +537 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/script.py +481 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_metric.py +179 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/utils.py +433 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/vqa_metric.py +254 -0
- evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
- evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
- evalscope/benchmarks/olympiad_bench/utils.py +565 -0
- evalscope/benchmarks/omni_bench/__init__.py +0 -0
- evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
- evalscope/benchmarks/omnidoc_bench/__init__.py +0 -0
- evalscope/benchmarks/omnidoc_bench/end2end_eval.py +349 -0
- evalscope/benchmarks/omnidoc_bench/metrics.py +547 -0
- evalscope/benchmarks/omnidoc_bench/omnidoc_bench_adapter.py +135 -0
- evalscope/benchmarks/omnidoc_bench/utils.py +1937 -0
- evalscope/benchmarks/piqa/__init__.py +0 -0
- evalscope/benchmarks/piqa/piqa_adapter.py +32 -0
- evalscope/benchmarks/poly_math/__init__.py +0 -0
- evalscope/benchmarks/poly_math/poly_math_adapter.py +132 -0
- evalscope/benchmarks/poly_math/utils/instruction.py +105 -0
- evalscope/benchmarks/pope/__init__.py +0 -0
- evalscope/benchmarks/pope/pope_adapter.py +112 -0
- evalscope/benchmarks/process_bench/__init__.py +0 -0
- evalscope/benchmarks/process_bench/process_bench_adapter.py +171 -0
- evalscope/benchmarks/pumed_qa/__init__.py +0 -0
- evalscope/benchmarks/pumed_qa/pubmed_qa_adapter.py +175 -0
- evalscope/benchmarks/qasc/__init__.py +0 -0
- evalscope/benchmarks/qasc/qasc_adapter.py +35 -0
- evalscope/benchmarks/race/race_adapter.py +33 -120
- evalscope/benchmarks/real_world_qa/__init__.py +0 -0
- evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
- evalscope/benchmarks/sciq/__init__.py +0 -0
- evalscope/benchmarks/sciq/sciq_adapter.py +36 -0
- evalscope/benchmarks/seed_bench_2_plus/__init__.py +0 -0
- evalscope/benchmarks/seed_bench_2_plus/seed_bench_2_plus_adapter.py +72 -0
- evalscope/benchmarks/simple_qa/__init__.py +0 -0
- evalscope/benchmarks/simple_qa/simple_qa_adapter.py +169 -0
- evalscope/benchmarks/simple_vqa/__init__.py +0 -0
- evalscope/benchmarks/simple_vqa/simple_vqa_adapter.py +169 -0
- evalscope/benchmarks/siqa/__init__.py +0 -0
- evalscope/benchmarks/siqa/siqa_adapter.py +39 -0
- evalscope/benchmarks/super_gpqa/__init__.py +0 -0
- evalscope/benchmarks/super_gpqa/prompt.py +88 -0
- evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +165 -0
- evalscope/benchmarks/super_gpqa/utils.py +86 -0
- evalscope/benchmarks/tau_bench/__init__.py +0 -0
- evalscope/benchmarks/tau_bench/tau2_bench/__init__.py +0 -0
- evalscope/benchmarks/tau_bench/tau2_bench/generation.py +158 -0
- evalscope/benchmarks/tau_bench/tau2_bench/tau2_bench_adapter.py +146 -0
- evalscope/benchmarks/tau_bench/tau_bench/__init__.py +0 -0
- evalscope/benchmarks/tau_bench/tau_bench/generation.py +147 -0
- evalscope/benchmarks/tau_bench/tau_bench/tau_bench_adapter.py +168 -0
- evalscope/benchmarks/text2image/__init__.py +0 -0
- evalscope/benchmarks/text2image/evalmuse_adapter.py +78 -0
- evalscope/benchmarks/text2image/genai_bench_adapter.py +53 -0
- evalscope/benchmarks/text2image/general_t2i_adapter.py +42 -0
- evalscope/benchmarks/text2image/hpdv2_adapter.py +52 -0
- evalscope/benchmarks/text2image/tifa_adapter.py +27 -0
- evalscope/benchmarks/tool_bench/__init__.py +0 -0
- evalscope/benchmarks/tool_bench/tool_bench_adapter.py +102 -0
- evalscope/benchmarks/tool_bench/utils.py +203 -0
- evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +56 -118
- evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +70 -270
- evalscope/benchmarks/visu_logic/__init__.py +0 -0
- evalscope/benchmarks/visu_logic/visu_logic_adapter.py +75 -0
- evalscope/benchmarks/winogrande/__init__.py +0 -0
- evalscope/benchmarks/winogrande/winogrande_adapter.py +34 -0
- evalscope/benchmarks/wmt/__init__.py +0 -0
- evalscope/benchmarks/wmt/wmt24_adapter.py +294 -0
- evalscope/benchmarks/zerobench/__init__.py +0 -0
- evalscope/benchmarks/zerobench/zerobench_adapter.py +64 -0
- evalscope/cli/cli.py +2 -0
- evalscope/cli/start_app.py +12 -2
- evalscope/cli/start_eval.py +4 -3
- evalscope/cli/start_perf.py +10 -2
- evalscope/cli/start_server.py +6 -3
- evalscope/collections/__init__.py +27 -3
- evalscope/collections/sampler.py +12 -11
- evalscope/collections/schema.py +13 -12
- evalscope/config.py +218 -147
- evalscope/constants.py +78 -82
- evalscope/evaluator/__init__.py +1 -1
- evalscope/evaluator/evaluator.py +334 -318
- evalscope/filters/__init__.py +2 -0
- evalscope/filters/extraction.py +126 -0
- evalscope/filters/selection.py +57 -0
- evalscope/metrics/__init__.py +59 -3
- evalscope/metrics/bert_score/__init__.py +0 -0
- evalscope/metrics/bert_score/scorer.py +338 -0
- evalscope/metrics/bert_score/utils.py +697 -0
- evalscope/metrics/bundled_rouge_score/rouge_scorer.py +20 -15
- evalscope/metrics/llm_judge.py +211 -0
- evalscope/metrics/math_parser.py +545 -0
- evalscope/metrics/metric.py +611 -0
- evalscope/metrics/metrics.py +112 -23
- evalscope/metrics/rouge_metric.py +11 -13
- evalscope/metrics/t2v_metrics/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/clipscore.py +14 -0
- evalscope/metrics/t2v_metrics/constants.py +12 -0
- evalscope/metrics/t2v_metrics/itmscore.py +14 -0
- evalscope/metrics/t2v_metrics/models/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/__init__.py +30 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/base_model.py +6 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +134 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +282 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +115 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +87 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +86 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +62 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/__init__.py +26 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +85 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +99 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +176 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +82 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +74 -0
- evalscope/metrics/t2v_metrics/models/model.py +45 -0
- evalscope/metrics/t2v_metrics/models/utils.py +25 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/__init__.py +22 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/__init__.py +1 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +306 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/builder.py +12 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +84 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_projector/builder.py +50 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +223 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +153 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/__init__.py +26 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +465 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +141 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +24 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +190 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +100 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +313 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +416 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +8 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +192 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +320 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/default.yaml +10 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_flant5xl.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt2.7b.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt6.7b.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_coco.yaml +36 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xl.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xxl.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna13b.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain.yaml +36 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_no_prefix.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_prefix.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_vitL.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xxl.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt2.7b.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt6.7b.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_vitL.yaml +37 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna13b.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna7b.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config.json +21 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config_albef.json +22 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_large_config.json +21 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +212 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/base_model.py +231 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +1111 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2.py +211 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_image_text_matching.py +109 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +457 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +370 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +765 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +274 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +896 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +1876 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +83 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +58 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_caption.py +212 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_classification.py +164 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_feature_extractor.py +202 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +187 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +179 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +115 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_pretrain.py +371 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +348 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +870 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +273 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +514 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +1291 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +476 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +35 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/base_processor.py +27 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/blip_processors.py +233 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +393 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +129 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +18 -0
- evalscope/metrics/t2v_metrics/score.py +78 -0
- evalscope/metrics/t2v_metrics/vqascore.py +14 -0
- evalscope/models/__init__.py +23 -13
- evalscope/models/image_edit_model.py +125 -0
- evalscope/models/mockllm.py +65 -0
- evalscope/models/model_apis.py +69 -0
- evalscope/models/modelscope.py +455 -0
- evalscope/models/openai_compatible.py +144 -0
- evalscope/models/text2image_model.py +124 -0
- evalscope/models/utils/openai.py +708 -0
- evalscope/perf/__init__.py +0 -1
- evalscope/perf/arguments.py +103 -69
- evalscope/perf/benchmark.py +114 -163
- evalscope/perf/http_client.py +59 -89
- evalscope/perf/main.py +91 -18
- evalscope/perf/plugin/__init__.py +3 -2
- evalscope/perf/plugin/api/__init__.py +4 -3
- evalscope/perf/plugin/api/base.py +27 -7
- evalscope/perf/plugin/api/custom_api.py +170 -57
- evalscope/perf/plugin/api/dashscope_api.py +4 -10
- evalscope/perf/plugin/api/default_api.py +214 -0
- evalscope/perf/plugin/api/openai_api.py +120 -41
- evalscope/perf/plugin/datasets/__init__.py +10 -6
- evalscope/perf/plugin/datasets/base.py +43 -1
- evalscope/perf/plugin/datasets/custom.py +22 -3
- evalscope/perf/plugin/datasets/flickr8k.py +5 -27
- evalscope/perf/plugin/datasets/kontext_bench.py +28 -0
- evalscope/perf/plugin/datasets/line_by_line.py +7 -3
- evalscope/perf/plugin/datasets/longalpaca.py +7 -3
- evalscope/perf/plugin/datasets/openqa.py +13 -14
- evalscope/perf/plugin/datasets/random_dataset.py +67 -0
- evalscope/perf/plugin/datasets/random_vl_dataset.py +80 -0
- evalscope/perf/plugin/datasets/speed_benchmark.py +11 -0
- evalscope/perf/plugin/registry.py +36 -16
- evalscope/perf/utils/analysis_result.py +24 -23
- evalscope/perf/utils/benchmark_util.py +95 -55
- evalscope/perf/utils/db_util.py +115 -78
- evalscope/perf/utils/local_server.py +12 -47
- evalscope/perf/utils/log_utils.py +63 -0
- evalscope/perf/utils/rich_display.py +192 -0
- evalscope/report/__init__.py +46 -3
- evalscope/report/combinator.py +143 -32
- evalscope/report/generator.py +74 -34
- evalscope/report/report.py +238 -0
- evalscope/run.py +71 -46
- evalscope/summarizer.py +5 -5
- evalscope/third_party/longbench_write/infer.py +1 -1
- evalscope/third_party/thinkbench/__init__.py +3 -0
- evalscope/third_party/thinkbench/eval.py +441 -0
- evalscope/third_party/thinkbench/infer.py +130 -0
- evalscope/third_party/thinkbench/resources/critique_template.txt +17 -0
- evalscope/third_party/thinkbench/resources/reformat_template.txt +31 -0
- evalscope/third_party/thinkbench/tools/__init__.py +0 -0
- evalscope/third_party/thinkbench/tools/llm.py +48 -0
- evalscope/third_party/thinkbench/tools/utils.py +13 -0
- evalscope/third_party/toolbench_static/llm/swift_infer.py +46 -20
- evalscope/third_party/toolbench_static/toolbench_static.py +2 -1
- evalscope/utils/__init__.py +82 -2
- evalscope/utils/argument_utils.py +64 -0
- evalscope/utils/chat_service.py +8 -6
- evalscope/utils/deprecation_utils.py +53 -0
- evalscope/utils/function_utils.py +266 -0
- evalscope/utils/import_utils.py +154 -0
- evalscope/utils/io_utils.py +336 -8
- evalscope/utils/json_schema.py +231 -0
- evalscope/utils/logger.py +121 -31
- evalscope/utils/model_utils.py +57 -1
- evalscope/utils/multi_choices.py +303 -0
- evalscope/utils/ner.py +377 -0
- evalscope/utils/url_utils.py +65 -0
- evalscope/version.py +2 -2
- evalscope-1.2.0.dist-info/METADATA +553 -0
- evalscope-1.2.0.dist-info/RECORD +628 -0
- {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info}/WHEEL +1 -1
- {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info}/top_level.txt +0 -1
- evalscope/backend/vlm_eval_kit/custom_dataset.py +0 -46
- evalscope/benchmarks/arc/ai2_arc.py +0 -151
- evalscope/benchmarks/benchmark.py +0 -76
- evalscope/benchmarks/ceval/ceval_exam.py +0 -146
- evalscope/benchmarks/ceval/samples.jsonl +0 -1
- evalscope/benchmarks/cmmlu/cmmlu.py +0 -161
- evalscope/benchmarks/cmmlu/samples.jsonl +0 -5
- evalscope/benchmarks/competition_math/competition_math.py +0 -79
- evalscope/benchmarks/data_adapter.py +0 -291
- evalscope/benchmarks/gsm8k/gsm8k.py +0 -121
- evalscope/benchmarks/hellaswag/hellaswag.py +0 -112
- evalscope/benchmarks/humaneval/humaneval.py +0 -79
- evalscope/benchmarks/mmlu/mmlu.py +0 -160
- evalscope/benchmarks/mmlu/samples.jsonl +0 -5
- evalscope/benchmarks/race/race.py +0 -104
- evalscope/benchmarks/race/samples.jsonl +0 -5
- evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -89
- evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -163
- evalscope/collections/evaluator.py +0 -198
- evalscope/evaluator/rating_eval.py +0 -157
- evalscope/evaluator/reviewer/__init__.py +0 -1
- evalscope/evaluator/reviewer/auto_reviewer.py +0 -391
- evalscope/metrics/code_metric.py +0 -98
- evalscope/metrics/named_metrics.py +0 -17
- evalscope/metrics/resources/gpt2-zhcn3-v4.bpe +0 -58485
- evalscope/metrics/resources/gpt2-zhcn3-v4.json +0 -1
- evalscope/models/base_adapter.py +0 -52
- evalscope/models/chat_adapter.py +0 -138
- evalscope/models/choice_adapter.py +0 -211
- evalscope/models/custom/__init__.py +0 -3
- evalscope/models/custom/custom_model.py +0 -53
- evalscope/models/custom/dummy_model.py +0 -63
- evalscope/models/custom_adapter.py +0 -67
- evalscope/models/local_model.py +0 -74
- evalscope/models/model.py +0 -229
- evalscope/models/server_adapter.py +0 -111
- evalscope/registry/__init__.py +0 -1
- evalscope/registry/config/cfg_arena.yaml +0 -77
- evalscope/registry/config/cfg_arena_zhihu.yaml +0 -63
- evalscope/registry/config/cfg_pairwise_baseline.yaml +0 -83
- evalscope/registry/config/cfg_single.yaml +0 -78
- evalscope/registry/data/prompt_template/lmsys_v2.jsonl +0 -8
- evalscope/registry/data/prompt_template/prompt_templates.jsonl +0 -8
- evalscope/registry/data/qa_browser/battle.jsonl +0 -634
- evalscope/registry/data/qa_browser/category_mapping.yaml +0 -10
- evalscope/registry/data/question.jsonl +0 -80
- evalscope/registry/tasks/arc.yaml +0 -28
- evalscope/registry/tasks/bbh.yaml +0 -26
- evalscope/registry/tasks/bbh_mini.yaml +0 -26
- evalscope/registry/tasks/ceval.yaml +0 -27
- evalscope/registry/tasks/ceval_mini.yaml +0 -26
- evalscope/registry/tasks/cmmlu.yaml +0 -27
- evalscope/registry/tasks/eval_qwen-7b-chat_v100.yaml +0 -28
- evalscope/registry/tasks/general_qa.yaml +0 -27
- evalscope/registry/tasks/gsm8k.yaml +0 -29
- evalscope/registry/tasks/mmlu.yaml +0 -29
- evalscope/registry/tasks/mmlu_mini.yaml +0 -27
- evalscope/report/app.py +0 -506
- evalscope/report/utils.py +0 -133
- evalscope/run_arena.py +0 -202
- evalscope/utils/arena_utils.py +0 -217
- evalscope/utils/completion_parsers.py +0 -82
- evalscope/utils/utils.py +0 -301
- evalscope-0.10.0.dist-info/METADATA +0 -565
- evalscope-0.10.0.dist-info/RECORD +0 -286
- tests/__init__.py +0 -1
- tests/cli/__init__.py +0 -1
- tests/cli/test_collection.py +0 -57
- tests/cli/test_run.py +0 -165
- tests/perf/__init__.py +0 -1
- tests/perf/test_perf.py +0 -101
- tests/rag/test_clip_benchmark.py +0 -85
- tests/rag/test_mteb.py +0 -138
- tests/rag/test_ragas.py +0 -120
- tests/swift/__init__.py +0 -1
- tests/swift/test_run_swift_eval.py +0 -145
- tests/swift/test_run_swift_vlm_eval.py +0 -127
- tests/swift/test_run_swift_vlm_jugde_eval.py +0 -156
- tests/test_run_all.py +0 -12
- tests/vlm/__init__.py +0 -1
- tests/vlm/test_vlmeval.py +0 -60
- {tests/rag → evalscope/api}/__init__.py +0 -0
- {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info}/entry_points.txt +0 -0
- {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info/licenses}/LICENSE +0 -0
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
import re
|
|
2
|
+
from typing import Any, Dict, List
|
|
3
|
+
|
|
4
|
+
from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
|
|
5
|
+
from evalscope.api.dataset import Sample
|
|
6
|
+
from evalscope.api.evaluator import TaskState
|
|
7
|
+
from evalscope.api.messages import ChatMessage, ChatMessageSystem, ChatMessageUser, Content, ContentImage, ContentText
|
|
8
|
+
from evalscope.api.metric import Score
|
|
9
|
+
from evalscope.api.registry import register_benchmark
|
|
10
|
+
from evalscope.constants import Tags
|
|
11
|
+
from evalscope.utils.logger import get_logger
|
|
12
|
+
|
|
13
|
+
# flake8: noqa
|
|
14
|
+
|
|
15
|
+
logger = get_logger()
|
|
16
|
+
|
|
17
|
+
SUBSET_LIST = [
|
|
18
|
+
'Biology/Medicine',
|
|
19
|
+
'Chemistry',
|
|
20
|
+
'Computer Science/AI',
|
|
21
|
+
'Engineering',
|
|
22
|
+
'Humanities/Social Science',
|
|
23
|
+
'Math',
|
|
24
|
+
'Physics',
|
|
25
|
+
'Other',
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
ANSWER_TYPE_EXACT_MATCH = 'exactMatch'
|
|
29
|
+
ANSWER_TYPE_MULTIPLE_CHOICE = 'multipleChoice'
|
|
30
|
+
|
|
31
|
+
# System prompt constants
|
|
32
|
+
SYSTEM_EXACT_ANSWER = 'Your response should be in the following format:\nExplanation: {your explanation for your final answer}\nExact Answer: {your succinct, final answer}\nConfidence: {your confidence score between 0% and 100% for your answer}'
|
|
33
|
+
|
|
34
|
+
SYSTEM_MC = 'Your response should be in the following format:\nExplanation: {your explanation for your answer choice}\nAnswer: {your chosen answer}\nConfidence: {your confidence score between 0% and 100% for your answer}'
|
|
35
|
+
|
|
36
|
+
JUDGE_PROMPT = """Judge whether the following [response] to [question] is correct or not based on the precise and unambiguous [correct_answer] below.
|
|
37
|
+
|
|
38
|
+
[question]: {question}
|
|
39
|
+
|
|
40
|
+
[response]: {response}
|
|
41
|
+
|
|
42
|
+
[correct_answer]: {correct_answer}
|
|
43
|
+
|
|
44
|
+
Your judgment must focus only on if there are meaningful differences between [correct_answer] and the [response]. Do not comment on any background to the problem, do not attempt to solve the problem, do not argue for any answer different than [correct_answer], focus only on whether the answers match. Explain why the [response] is correct or incorrect based on [correct_answer] in one or two sentences. Finally, write your answer in the format 'GRADE: C' for correct answer or 'GRADE: I' for incorrect answer.
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@register_benchmark(
|
|
49
|
+
BenchmarkMeta(
|
|
50
|
+
name='hle',
|
|
51
|
+
pretty_name="Humanity's-Last-Exam",
|
|
52
|
+
tags=[Tags.KNOWLEDGE, Tags.QA],
|
|
53
|
+
description='Humanity\'s Last Exam (HLE) is a language model benchmark consisting of 2,500 '
|
|
54
|
+
'questions across a broad range of subjects. It was created jointly by the Center '
|
|
55
|
+
'for AI Safety and Scale AI. The benchmark classifies the questions into the '
|
|
56
|
+
'following broad subjects: mathematics (41%), physics (9%), biology/medicine (11%), '
|
|
57
|
+
'humanities/social science (9%), computer science/artificial intelligence (10%), '
|
|
58
|
+
'engineering (4%), chemistry (7%), and other (9%). Around 14% of the questions '
|
|
59
|
+
'require the ability to understand both text and images, i.e., multi-modality. '
|
|
60
|
+
'24% of the questions are multiple-choice; the rest are short-answer, exact-match questions. \n'
|
|
61
|
+
'**To evaluate the performance of model without multi-modality capabilities, '
|
|
62
|
+
'please set the `extra_params["include_multi_modal"]` to `False`.**', # noqa: E501
|
|
63
|
+
dataset_id='cais/hle',
|
|
64
|
+
subset_list=SUBSET_LIST,
|
|
65
|
+
metric_list=['acc'],
|
|
66
|
+
eval_split='test',
|
|
67
|
+
prompt_template='{question}',
|
|
68
|
+
extra_params={'include_multi_modal': True}
|
|
69
|
+
)
|
|
70
|
+
)
|
|
71
|
+
class HLEAdapter(DefaultDataAdapter):
|
|
72
|
+
|
|
73
|
+
def __init__(self, *args, **kwargs):
|
|
74
|
+
super().__init__(*args, **kwargs)
|
|
75
|
+
|
|
76
|
+
self._use_llm_judge = True # Use LLM as a judge by default
|
|
77
|
+
self.reformat_subset = True
|
|
78
|
+
self.include_multi_modal = self.extra_params.get('include_multi_modal', True)
|
|
79
|
+
|
|
80
|
+
def record_to_sample(self, record: Dict[str, Any]) -> Sample:
|
|
81
|
+
answer_type = record['answer_type']
|
|
82
|
+
system_prompt = (SYSTEM_EXACT_ANSWER if answer_type == ANSWER_TYPE_EXACT_MATCH else SYSTEM_MC)
|
|
83
|
+
text_content = ContentText(text=record['question'])
|
|
84
|
+
|
|
85
|
+
content: List[Content] = [text_content]
|
|
86
|
+
if record['image']:
|
|
87
|
+
image_content = ContentImage(image=record['image'])
|
|
88
|
+
content.append(image_content)
|
|
89
|
+
|
|
90
|
+
messages: List[ChatMessage] = [
|
|
91
|
+
ChatMessageSystem(content=system_prompt),
|
|
92
|
+
ChatMessageUser(content=content),
|
|
93
|
+
]
|
|
94
|
+
return Sample(
|
|
95
|
+
input=messages,
|
|
96
|
+
subset_key=record['category'],
|
|
97
|
+
metadata={
|
|
98
|
+
'uid': record['id'],
|
|
99
|
+
'author_name': record['author_name'],
|
|
100
|
+
'rationale': record['rationale'],
|
|
101
|
+
'raw_subject': record['raw_subject'],
|
|
102
|
+
'category': record['category'],
|
|
103
|
+
'has_image': bool(record['image']),
|
|
104
|
+
},
|
|
105
|
+
target=record['answer'],
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
def sample_filter(self, sample):
|
|
109
|
+
if not self.include_multi_modal:
|
|
110
|
+
if sample.metadata is not None and sample.metadata['has_image']:
|
|
111
|
+
return False
|
|
112
|
+
return True
|
|
113
|
+
|
|
114
|
+
def llm_match_score(
|
|
115
|
+
self,
|
|
116
|
+
original_prediction: str,
|
|
117
|
+
filtered_prediction: str,
|
|
118
|
+
reference: str,
|
|
119
|
+
task_state: TaskState,
|
|
120
|
+
) -> Score:
|
|
121
|
+
score = Score(
|
|
122
|
+
extracted_prediction=filtered_prediction,
|
|
123
|
+
prediction=original_prediction,
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
confidence = 100
|
|
127
|
+
if task_state.output and task_state.output.completion:
|
|
128
|
+
confidence_match = re.search(r'confidence:\s*(\d+)', task_state.output.completion, re.IGNORECASE)
|
|
129
|
+
if confidence_match:
|
|
130
|
+
confidence = int(confidence_match.group(1))
|
|
131
|
+
|
|
132
|
+
judge_prompt = JUDGE_PROMPT.format(
|
|
133
|
+
question=task_state.input_text, response=filtered_prediction, correct_answer=reference
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
# Request judge and obtain score
|
|
137
|
+
judge_response = self.llm_judge.judge(prompt=judge_prompt)
|
|
138
|
+
|
|
139
|
+
# Parse judge response to get accuracy score
|
|
140
|
+
accuracy_score = re.search(r'GRADE:\s*([CI])', judge_response, re.IGNORECASE)
|
|
141
|
+
if accuracy_score:
|
|
142
|
+
score.value = {
|
|
143
|
+
'acc': 1.0 if accuracy_score.group(1) == 'C' else 0.0,
|
|
144
|
+
}
|
|
145
|
+
score.explanation = f'LLM judge: {judge_response}'
|
|
146
|
+
score.metadata = {
|
|
147
|
+
'source': 'llm_judge',
|
|
148
|
+
'judge_strategy': self.judge_strategy,
|
|
149
|
+
'model': self.llm_judge.model_id,
|
|
150
|
+
'confidence': confidence,
|
|
151
|
+
}
|
|
152
|
+
score.main_score_name = 'acc'
|
|
153
|
+
return score
|
|
@@ -1,104 +1,96 @@
|
|
|
1
1
|
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
|
+
# flake8: noqa: E501
|
|
2
3
|
import re
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
from evalscope.
|
|
6
|
-
from evalscope.
|
|
4
|
+
from typing import Any, Dict
|
|
5
|
+
|
|
6
|
+
from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
|
|
7
|
+
from evalscope.api.dataset import Sample
|
|
8
|
+
from evalscope.api.evaluator import TaskState
|
|
9
|
+
from evalscope.api.messages.chat_message import ChatMessageUser
|
|
10
|
+
from evalscope.api.metric import Score
|
|
11
|
+
from evalscope.api.registry import register_benchmark
|
|
12
|
+
from evalscope.constants import Tags
|
|
7
13
|
from evalscope.utils.logger import get_logger
|
|
8
14
|
|
|
9
15
|
logger = get_logger()
|
|
10
16
|
|
|
11
|
-
# Example:
|
|
12
|
-
# {"task_id": "HumanEval/0", "prompt": "from typing import List\n\n\ndef has_close_elements(numbers: List[float], threshold: float) -> bool:\n \"\"\" Check if in given list of numbers, are any two numbers closer to each other than\n given threshold.\n >>> has_close_elements([1.0, 2.0, 3.0], 0.5)\n False\n >>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3)\n True\n \"\"\"\n", "entry_point": "has_close_elements", "canonical_solution": " for idx, elem in enumerate(numbers):\n for idx2, elem2 in enumerate(numbers):\n if idx != idx2:\n distance = abs(elem - elem2)\n if distance < threshold:\n return True\n\n return False\n", "test": "\n\nMETADATA = {\n 'author': 'jt',\n 'dataset': 'test'\n}\n\n\ndef check(candidate):\n assert candidate([1.0, 2.0, 3.9, 4.0, 5.0, 2.2], 0.3) == True\n assert candidate([1.0, 2.0, 3.9, 4.0, 5.0, 2.2], 0.05) == False\n assert candidate([1.0, 2.0, 5.9, 4.0, 5.0], 0.95) == True\n assert candidate([1.0, 2.0, 5.9, 4.0, 5.0], 0.8) == False\n assert candidate([1.0, 2.0, 3.0, 4.0, 5.0, 2.0], 0.1) == True\n assert candidate([1.1, 2.2, 3.1, 4.1, 5.1], 1.0) == True\n assert candidate([1.1, 2.2, 3.1, 4.1, 5.1], 0.5) == False\n\n"} # noqa
|
|
13
|
-
|
|
14
17
|
|
|
15
|
-
@
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
18
|
+
@register_benchmark(
|
|
19
|
+
BenchmarkMeta(
|
|
20
|
+
name='humaneval',
|
|
21
|
+
pretty_name='HumanEval',
|
|
22
|
+
tags=[Tags.CODING],
|
|
23
|
+
description=
|
|
24
|
+
'HumanEval is a benchmark for evaluating the ability of code generation models to write Python functions based on given specifications. It consists of programming tasks with a defined input-output behavior. '
|
|
25
|
+
'**By default the code is executed in local environment. We recommend using sandbox execution to safely run and evaluate the generated code, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/sandbox.html) for more details.**', # noqa: E501
|
|
26
|
+
dataset_id='opencompass/humaneval',
|
|
27
|
+
subset_list=['openai_humaneval'],
|
|
28
|
+
aggregation='mean_and_pass_at_k',
|
|
29
|
+
eval_split='test',
|
|
30
|
+
prompt_template=
|
|
31
|
+
'Read the following function signature and docstring, and fully implement the function described. Your response should only contain the code for this function.\n{question}',
|
|
32
|
+
review_timeout=4,
|
|
33
|
+
)
|
|
25
34
|
)
|
|
26
|
-
class HumanevalAdapter(
|
|
35
|
+
class HumanevalAdapter(DefaultDataAdapter):
|
|
27
36
|
"""
|
|
28
|
-
|
|
37
|
+
HumanEval adapter using the new data processing framework.
|
|
29
38
|
"""
|
|
30
39
|
|
|
31
|
-
def
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
def load_from_disk(self, dataset_name_or_path, subset_list, work_dir, **kwargs) -> dict:
|
|
51
|
-
data_dict = {}
|
|
52
|
-
for subset_name in subset_list:
|
|
53
|
-
data_dict[subset_name] = {}
|
|
54
|
-
# [{'task_id': '', 'prompt': '', 'entry_point': '', 'canonical_solution': '', 'test': ''}, ...]
|
|
55
|
-
data_dict[subset_name][self.eval_split] = [task for task in self.read_problems_func(dataset_name_or_path)]
|
|
56
|
-
|
|
57
|
-
return data_dict
|
|
58
|
-
|
|
59
|
-
def gen_prompt(self, input_d: dict, few_shot_list: list, **kwargs) -> dict:
|
|
60
|
-
"""
|
|
61
|
-
Generate prompt for the model.
|
|
62
|
-
|
|
63
|
-
Args:
|
|
64
|
-
input_d (dict): The raw input. A single data format of the Humaneval:
|
|
65
|
-
{'task_id': '', 'prompt': '', 'entry_point': '', 'canonical_solution': '', 'test': ''}
|
|
66
|
-
"""
|
|
67
|
-
full_prompt = input_d['prompt']
|
|
68
|
-
full_prompt = f'Complete the following python code:\n{full_prompt}' if self.prompt_template else full_prompt
|
|
69
|
-
|
|
70
|
-
return {'data': [full_prompt], 'system_prompt': self.prompt_template}
|
|
40
|
+
def record_to_sample(self, record: Dict[str, Any]) -> Sample:
|
|
41
|
+
"""Convert a data record to a Sample object."""
|
|
42
|
+
query = record['prompt']
|
|
43
|
+
full_prompt = self.prompt_template.format(question=query)
|
|
44
|
+
|
|
45
|
+
return Sample(
|
|
46
|
+
input=[ChatMessageUser(content=full_prompt)],
|
|
47
|
+
target=record['canonical_solution'],
|
|
48
|
+
metadata={
|
|
49
|
+
'task_id': record['task_id'],
|
|
50
|
+
'entry_point': record['entry_point'],
|
|
51
|
+
'prompt': record['prompt'],
|
|
52
|
+
'test': record['test'],
|
|
53
|
+
}
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
def extract_answer(self, prediction: str, task_state: TaskState) -> str:
|
|
57
|
+
"""Extract code from the prediction."""
|
|
58
|
+
return self._postprocess(prediction)
|
|
71
59
|
|
|
72
60
|
@classmethod
|
|
73
61
|
def _postprocess(cls, text: str) -> str:
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
else:
|
|
79
|
-
text = blocks[0] # fetch the first code block
|
|
80
|
-
if not text.startswith('\n'): # in case starting with ```python
|
|
81
|
-
text = text[max(text.find('\n') + 1, 0):]
|
|
82
|
-
if text.strip().startswith('from') or text.strip().startswith('import'):
|
|
83
|
-
def_idx = text.find('def')
|
|
84
|
-
if def_idx != -1:
|
|
85
|
-
text = text[max(text.find('\n', def_idx) + 1, 0):]
|
|
86
|
-
text = text.split('\n\n')[0]
|
|
87
|
-
if text.strip().startswith('def'):
|
|
88
|
-
text = '\n'.join(text.split('\n')[1:])
|
|
89
|
-
if not text.startswith(' '):
|
|
90
|
-
if text.startswith(' '):
|
|
91
|
-
text = ' ' + text.lstrip()
|
|
92
|
-
else:
|
|
93
|
-
text = '\n'.join([' ' + line for line in text.split('\n')])
|
|
62
|
+
"""Extract code from markdown code blocks."""
|
|
63
|
+
blocks = re.findall(r'```\w*\n(.*?)```', text, re.DOTALL)
|
|
64
|
+
if len(blocks) >= 1:
|
|
65
|
+
text = blocks[0]
|
|
94
66
|
return text
|
|
95
67
|
|
|
96
|
-
def
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
68
|
+
def match_score(
|
|
69
|
+
self, original_prediction: str, filtered_prediction: str, reference: str, task_state: TaskState
|
|
70
|
+
) -> Score:
|
|
71
|
+
|
|
72
|
+
score = Score(
|
|
73
|
+
extracted_prediction=filtered_prediction,
|
|
74
|
+
prediction=original_prediction,
|
|
75
|
+
)
|
|
76
|
+
problem = task_state.metadata
|
|
77
|
+
completion = filtered_prediction
|
|
78
|
+
|
|
79
|
+
if not self.use_sandbox:
|
|
80
|
+
from .utils import check_correctness
|
|
81
|
+
|
|
82
|
+
# Execute the code and check correctness
|
|
83
|
+
res = check_correctness(problem=problem, completion=completion, timeout=self.review_timeout)
|
|
84
|
+
passed = res['passed']
|
|
85
|
+
else:
|
|
86
|
+
check_program = (
|
|
87
|
+
problem['prompt'] + completion + '\n' + problem['test'] + '\n' + f"check({problem['entry_point']})"
|
|
88
|
+
)
|
|
89
|
+
res = self.execute_code_in_sandbox(code=check_program, timeout=self.review_timeout, language='python')
|
|
90
|
+
passed = res.get('status') == 'success'
|
|
91
|
+
# Set score values
|
|
92
|
+
score.value = {'acc': passed}
|
|
93
|
+
score.metadata = {'task_id': problem['task_id'], 'timeout': self.review_timeout, 'execution_result': res}
|
|
94
|
+
score.main_score_name = 'acc'
|
|
95
|
+
|
|
96
|
+
return score
|
|
@@ -0,0 +1,235 @@
|
|
|
1
|
+
import contextlib
|
|
2
|
+
import faulthandler
|
|
3
|
+
import io
|
|
4
|
+
import multiprocessing
|
|
5
|
+
import os
|
|
6
|
+
import platform
|
|
7
|
+
import signal
|
|
8
|
+
import tempfile
|
|
9
|
+
from typing import Dict, Optional
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def unsafe_execute(problem: Dict, completion: str, timeout: float, result):
|
|
13
|
+
with create_tempdir():
|
|
14
|
+
|
|
15
|
+
# These system calls are needed when cleaning up tempdir.
|
|
16
|
+
import os
|
|
17
|
+
import shutil
|
|
18
|
+
|
|
19
|
+
rmtree = shutil.rmtree
|
|
20
|
+
rmdir = os.rmdir
|
|
21
|
+
chdir = os.chdir
|
|
22
|
+
|
|
23
|
+
# Disable functionalities that can make destructive changes to the test.
|
|
24
|
+
reliability_guard()
|
|
25
|
+
|
|
26
|
+
# Construct the check program and run it.
|
|
27
|
+
check_program = (
|
|
28
|
+
problem['prompt'] + completion + '\n' + problem['test'] + '\n' + f"check({problem['entry_point']})"
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
exec_globals = {}
|
|
33
|
+
with swallow_io():
|
|
34
|
+
with time_limit(timeout):
|
|
35
|
+
# WARNING
|
|
36
|
+
# This program exists to execute untrusted model-generated code. Although
|
|
37
|
+
# it is highly unlikely that model-generated code will do something overtly
|
|
38
|
+
# malicious in response to this test suite, model-generated code may act
|
|
39
|
+
# destructively due to a lack of model capability or alignment.
|
|
40
|
+
# Users are strongly encouraged to sandbox this evaluation suite so that it
|
|
41
|
+
# does not perform destructive actions on their host or network. For more
|
|
42
|
+
# information on how OpenAI sandboxes its code, see the accompanying paper.
|
|
43
|
+
# Once you have read this disclaimer and taken appropriate precautions,
|
|
44
|
+
# uncomment the following line and proceed at your own risk:
|
|
45
|
+
exec(check_program, exec_globals)
|
|
46
|
+
result.append('passed')
|
|
47
|
+
except TimeoutException:
|
|
48
|
+
result.append('timed out')
|
|
49
|
+
except BaseException as e:
|
|
50
|
+
result.append(f'failed: {e}')
|
|
51
|
+
|
|
52
|
+
# Needed for cleaning up.
|
|
53
|
+
shutil.rmtree = rmtree
|
|
54
|
+
os.rmdir = rmdir
|
|
55
|
+
os.chdir = chdir
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def check_correctness(problem: Dict, completion: str, timeout: float, completion_id: Optional[int] = None) -> Dict:
|
|
59
|
+
"""
|
|
60
|
+
Evaluates the functional correctness of a completion by running the test
|
|
61
|
+
suite provided in the problem.
|
|
62
|
+
|
|
63
|
+
:param completion_id: an optional completion ID so we can match
|
|
64
|
+
the results later even if execution finishes asynchronously.
|
|
65
|
+
"""
|
|
66
|
+
|
|
67
|
+
manager = multiprocessing.Manager()
|
|
68
|
+
result = manager.list()
|
|
69
|
+
|
|
70
|
+
p = multiprocessing.Process(target=unsafe_execute, args=(problem, completion, timeout, result))
|
|
71
|
+
p.start()
|
|
72
|
+
p.join(timeout=timeout + 1)
|
|
73
|
+
if p.is_alive():
|
|
74
|
+
p.kill()
|
|
75
|
+
|
|
76
|
+
if not result:
|
|
77
|
+
result.append('timed out')
|
|
78
|
+
|
|
79
|
+
return dict(
|
|
80
|
+
task_id=problem['task_id'],
|
|
81
|
+
passed=result[0] == 'passed',
|
|
82
|
+
result=result[0],
|
|
83
|
+
completion_id=completion_id,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
@contextlib.contextmanager
|
|
88
|
+
def time_limit(seconds: float):
|
|
89
|
+
|
|
90
|
+
def signal_handler(signum, frame):
|
|
91
|
+
raise TimeoutException('Timed out!')
|
|
92
|
+
|
|
93
|
+
signal.setitimer(signal.ITIMER_REAL, seconds)
|
|
94
|
+
signal.signal(signal.SIGALRM, signal_handler)
|
|
95
|
+
try:
|
|
96
|
+
yield
|
|
97
|
+
finally:
|
|
98
|
+
signal.setitimer(signal.ITIMER_REAL, 0)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
@contextlib.contextmanager
|
|
102
|
+
def swallow_io():
|
|
103
|
+
stream = WriteOnlyStringIO()
|
|
104
|
+
with contextlib.redirect_stdout(stream):
|
|
105
|
+
with contextlib.redirect_stderr(stream):
|
|
106
|
+
with redirect_stdin(stream):
|
|
107
|
+
yield
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
@contextlib.contextmanager
|
|
111
|
+
def create_tempdir():
|
|
112
|
+
with tempfile.TemporaryDirectory() as dirname:
|
|
113
|
+
with chdir(dirname):
|
|
114
|
+
yield dirname
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
class TimeoutException(Exception):
|
|
118
|
+
pass
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
class WriteOnlyStringIO(io.StringIO):
|
|
122
|
+
"""StringIO that throws an exception when it's read from"""
|
|
123
|
+
|
|
124
|
+
def read(self, *args, **kwargs):
|
|
125
|
+
raise IOError
|
|
126
|
+
|
|
127
|
+
def readline(self, *args, **kwargs):
|
|
128
|
+
raise IOError
|
|
129
|
+
|
|
130
|
+
def readlines(self, *args, **kwargs):
|
|
131
|
+
raise IOError
|
|
132
|
+
|
|
133
|
+
def readable(self, *args, **kwargs):
|
|
134
|
+
"""Returns True if the IO object can be read."""
|
|
135
|
+
return False
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
class redirect_stdin(contextlib._RedirectStream): # type: ignore
|
|
139
|
+
_stream = 'stdin'
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
@contextlib.contextmanager
|
|
143
|
+
def chdir(root):
|
|
144
|
+
if root == '.':
|
|
145
|
+
yield
|
|
146
|
+
return
|
|
147
|
+
cwd = os.getcwd()
|
|
148
|
+
os.chdir(root)
|
|
149
|
+
try:
|
|
150
|
+
yield
|
|
151
|
+
except BaseException as exc:
|
|
152
|
+
raise exc
|
|
153
|
+
finally:
|
|
154
|
+
os.chdir(cwd)
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def reliability_guard(maximum_memory_bytes: Optional[int] = None):
|
|
158
|
+
"""
|
|
159
|
+
This disables various destructive functions and prevents the generated code
|
|
160
|
+
from interfering with the test (e.g. fork bomb, killing other processes,
|
|
161
|
+
removing filesystem files, etc.)
|
|
162
|
+
|
|
163
|
+
WARNING
|
|
164
|
+
This function is NOT a security sandbox. Untrusted code, including, model-
|
|
165
|
+
generated code, should not be blindly executed outside of one. See the
|
|
166
|
+
Codex paper for more information about OpenAI's code sandbox, and proceed
|
|
167
|
+
with caution.
|
|
168
|
+
"""
|
|
169
|
+
|
|
170
|
+
if maximum_memory_bytes is not None:
|
|
171
|
+
import resource
|
|
172
|
+
|
|
173
|
+
resource.setrlimit(resource.RLIMIT_AS, (maximum_memory_bytes, maximum_memory_bytes))
|
|
174
|
+
resource.setrlimit(resource.RLIMIT_DATA, (maximum_memory_bytes, maximum_memory_bytes))
|
|
175
|
+
if not platform.uname().system == 'Darwin':
|
|
176
|
+
resource.setrlimit(resource.RLIMIT_STACK, (maximum_memory_bytes, maximum_memory_bytes))
|
|
177
|
+
|
|
178
|
+
faulthandler.disable()
|
|
179
|
+
|
|
180
|
+
import builtins
|
|
181
|
+
|
|
182
|
+
builtins.exit = None
|
|
183
|
+
builtins.quit = None
|
|
184
|
+
|
|
185
|
+
import os
|
|
186
|
+
|
|
187
|
+
os.environ['OMP_NUM_THREADS'] = '1'
|
|
188
|
+
|
|
189
|
+
os.kill = None
|
|
190
|
+
os.system = None
|
|
191
|
+
os.putenv = None
|
|
192
|
+
os.remove = None
|
|
193
|
+
os.removedirs = None
|
|
194
|
+
os.rmdir = None
|
|
195
|
+
os.fchdir = None
|
|
196
|
+
os.setuid = None
|
|
197
|
+
os.fork = None
|
|
198
|
+
os.forkpty = None
|
|
199
|
+
os.killpg = None
|
|
200
|
+
os.rename = None
|
|
201
|
+
os.renames = None
|
|
202
|
+
os.truncate = None
|
|
203
|
+
os.replace = None
|
|
204
|
+
os.unlink = None
|
|
205
|
+
os.fchmod = None
|
|
206
|
+
os.fchown = None
|
|
207
|
+
os.chmod = None
|
|
208
|
+
os.chown = None
|
|
209
|
+
os.chroot = None
|
|
210
|
+
os.fchdir = None
|
|
211
|
+
os.lchflags = None
|
|
212
|
+
os.lchmod = None
|
|
213
|
+
os.lchown = None
|
|
214
|
+
os.getcwd = None
|
|
215
|
+
os.chdir = None
|
|
216
|
+
|
|
217
|
+
import shutil
|
|
218
|
+
|
|
219
|
+
shutil.rmtree = None
|
|
220
|
+
shutil.move = None
|
|
221
|
+
shutil.chown = None
|
|
222
|
+
|
|
223
|
+
import subprocess
|
|
224
|
+
|
|
225
|
+
subprocess.Popen = None # type: ignore
|
|
226
|
+
|
|
227
|
+
__builtins__['help'] = None
|
|
228
|
+
|
|
229
|
+
import sys
|
|
230
|
+
|
|
231
|
+
sys.modules['ipdb'] = None
|
|
232
|
+
sys.modules['joblib'] = None
|
|
233
|
+
sys.modules['resource'] = None
|
|
234
|
+
sys.modules['psutil'] = None
|
|
235
|
+
sys.modules['tkinter'] = None
|