evalscope 0.17.1__tar.gz → 1.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of evalscope might be problematic. Click here for more details.
- {evalscope-0.17.1/evalscope.egg-info → evalscope-1.0.1}/PKG-INFO +87 -49
- {evalscope-0.17.1 → evalscope-1.0.1}/README.md +86 -48
- evalscope-1.0.1/evalscope/__init__.py +8 -0
- evalscope-1.0.1/evalscope/api/benchmark/__init__.py +3 -0
- evalscope-1.0.1/evalscope/api/benchmark/adapters/__init__.py +5 -0
- evalscope-1.0.1/evalscope/api/benchmark/adapters/default_data_adapter.py +684 -0
- evalscope-1.0.1/evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
- evalscope-1.0.1/evalscope/api/benchmark/adapters/multi_choice_adapter.py +83 -0
- evalscope-1.0.1/evalscope/api/benchmark/adapters/text2image_adapter.py +156 -0
- evalscope-1.0.1/evalscope/api/benchmark/adapters/vision_language_adapter.py +6 -0
- evalscope-1.0.1/evalscope/api/benchmark/benchmark.py +356 -0
- evalscope-1.0.1/evalscope/api/benchmark/meta.py +121 -0
- evalscope-1.0.1/evalscope/api/dataset/__init__.py +2 -0
- evalscope-1.0.1/evalscope/api/dataset/dataset.py +349 -0
- evalscope-1.0.1/evalscope/api/dataset/loader.py +262 -0
- evalscope-1.0.1/evalscope/api/dataset/utils.py +143 -0
- evalscope-1.0.1/evalscope/api/evaluator/__init__.py +3 -0
- evalscope-1.0.1/evalscope/api/evaluator/cache.py +378 -0
- evalscope-1.0.1/evalscope/api/evaluator/evaluator.py +56 -0
- evalscope-1.0.1/evalscope/api/evaluator/state.py +275 -0
- evalscope-1.0.1/evalscope/api/filter/__init__.py +1 -0
- evalscope-1.0.1/evalscope/api/filter/filter.py +72 -0
- evalscope-1.0.1/evalscope/api/messages/__init__.py +12 -0
- evalscope-1.0.1/evalscope/api/messages/chat_message.py +243 -0
- evalscope-1.0.1/evalscope/api/messages/content.py +102 -0
- evalscope-1.0.1/evalscope/api/messages/utils.py +35 -0
- evalscope-1.0.1/evalscope/api/metric/__init__.py +2 -0
- evalscope-1.0.1/evalscope/api/metric/metric.py +55 -0
- evalscope-1.0.1/evalscope/api/metric/scorer.py +113 -0
- evalscope-1.0.1/evalscope/api/mixin/__init__.py +1 -0
- evalscope-1.0.1/evalscope/api/mixin/llm_judge_mixin.py +168 -0
- evalscope-1.0.1/evalscope/api/model/__init__.py +12 -0
- evalscope-1.0.1/evalscope/api/model/generate_config.py +155 -0
- evalscope-1.0.1/evalscope/api/model/model.py +386 -0
- evalscope-1.0.1/evalscope/api/model/model_output.py +285 -0
- evalscope-1.0.1/evalscope/api/registry.py +182 -0
- evalscope-1.0.1/evalscope/api/tool/__init__.py +3 -0
- evalscope-1.0.1/evalscope/api/tool/tool_call.py +101 -0
- evalscope-1.0.1/evalscope/api/tool/tool_info.py +173 -0
- evalscope-1.0.1/evalscope/api/tool/utils.py +64 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/app.py +3 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/ui/app_ui.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/ui/multi_model.py +50 -25
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/ui/single_model.py +26 -14
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/utils/data_utils.py +43 -27
- evalscope-1.0.1/evalscope/app/utils/env_utils.py +12 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/utils/text_utils.py +14 -14
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/utils/visualization.py +9 -4
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/arguments.py +7 -10
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/opencompass/api_meta_template.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/opencompass/backend_manager.py +6 -5
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +10 -10
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/task_template.py +8 -4
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/task_template.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/tasks/build_transform.py +7 -4
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/utils/embedding.py +10 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/utils/llm.py +13 -12
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/__init__.py +0 -2
- evalscope-1.0.1/evalscope/benchmarks/aime/aime24_adapter.py +50 -0
- evalscope-1.0.1/evalscope/benchmarks/aime/aime25_adapter.py +46 -0
- evalscope-1.0.1/evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +133 -0
- evalscope-1.0.1/evalscope/benchmarks/arc/arc_adapter.py +46 -0
- evalscope-1.0.1/evalscope/benchmarks/arena_hard/arena_hard_adapter.py +148 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/arena_hard/utils.py +37 -1
- evalscope-1.0.1/evalscope/benchmarks/bbh/bbh_adapter.py +175 -0
- evalscope-1.0.1/evalscope/benchmarks/bfcl/bfcl_adapter.py +254 -0
- evalscope-1.0.1/evalscope/benchmarks/bfcl/generation.py +222 -0
- evalscope-1.0.1/evalscope/benchmarks/ceval/ceval_adapter.py +169 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +85 -82
- evalscope-1.0.1/evalscope/benchmarks/cmmlu/cmmlu_adapter.py +122 -0
- evalscope-1.0.1/evalscope/benchmarks/competition_math/competition_math_adapter.py +73 -0
- evalscope-1.0.1/evalscope/benchmarks/data_collection/data_collection_adapter.py +214 -0
- evalscope-1.0.1/evalscope/benchmarks/docmath/docmath_adapter.py +143 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/docmath/utils.py +4 -5
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/drop/drop_adapter.py +88 -40
- evalscope-1.0.1/evalscope/benchmarks/frames/frames_adapter.py +175 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/general_arena/general_arena_adapter.py +140 -98
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/general_arena/utils.py +23 -27
- evalscope-1.0.1/evalscope/benchmarks/general_mcq/general_mcq_adapter.py +58 -0
- evalscope-1.0.1/evalscope/benchmarks/general_qa/general_qa_adapter.py +94 -0
- evalscope-1.0.1/evalscope/benchmarks/gpqa/gpqa_adapter.py +90 -0
- evalscope-0.17.1/evalscope/benchmarks/gpqa/chain_of_thought.txt → evalscope-1.0.1/evalscope/benchmarks/gpqa/prompt.py +12 -5
- evalscope-1.0.1/evalscope/benchmarks/gsm8k/gsm8k_adapter.py +76 -0
- evalscope-1.0.1/evalscope/benchmarks/hellaswag/hellaswag_adapter.py +62 -0
- evalscope-1.0.1/evalscope/benchmarks/hle/hle_adapter.py +152 -0
- evalscope-1.0.1/evalscope/benchmarks/humaneval/humaneval_adapter.py +124 -0
- evalscope-1.0.1/evalscope/benchmarks/ifeval/ifeval_adapter.py +83 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/ifeval/instructions.py +109 -64
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/ifeval/instructions_registry.py +1 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/ifeval/instructions_util.py +2 -3
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/ifeval/utils.py +6 -7
- evalscope-1.0.1/evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
- evalscope-1.0.1/evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
- evalscope-1.0.1/evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
- evalscope-1.0.1/evalscope/benchmarks/iquiz/iquiz_adapter.py +35 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/live_code_bench/evaluate_utils.py +2 -2
- evalscope-1.0.1/evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +138 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/live_code_bench/load_utils.py +13 -21
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/live_code_bench/testing_util.py +6 -2
- evalscope-1.0.1/evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +56 -0
- evalscope-1.0.1/evalscope/benchmarks/math_500/math_500_adapter.py +51 -0
- evalscope-1.0.1/evalscope/benchmarks/math_vista/math_vista_adapter.py +129 -0
- evalscope-1.0.1/evalscope/benchmarks/mmlu/mmlu_adapter.py +107 -0
- evalscope-1.0.1/evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +94 -0
- evalscope-1.0.1/evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +139 -0
- evalscope-1.0.1/evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
- evalscope-1.0.1/evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +129 -0
- evalscope-1.0.1/evalscope/benchmarks/musr/musr_adapter.py +43 -0
- evalscope-1.0.1/evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +392 -0
- evalscope-1.0.1/evalscope/benchmarks/process_bench/process_bench_adapter.py +170 -0
- evalscope-1.0.1/evalscope/benchmarks/race/race_adapter.py +49 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/simple_qa/simple_qa_adapter.py +72 -70
- evalscope-0.17.1/evalscope/benchmarks/super_gpqa/five_shot_prompt.txt → evalscope-1.0.1/evalscope/benchmarks/super_gpqa/prompt.py +14 -16
- evalscope-1.0.1/evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +165 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/super_gpqa/utils.py +2 -1
- evalscope-1.0.1/evalscope/benchmarks/tau_bench/generation.py +147 -0
- evalscope-1.0.1/evalscope/benchmarks/tau_bench/tau_bench_adapter.py +164 -0
- evalscope-1.0.1/evalscope/benchmarks/text2image/evalmuse_adapter.py +78 -0
- evalscope-1.0.1/evalscope/benchmarks/text2image/genai_bench_adapter.py +53 -0
- evalscope-1.0.1/evalscope/benchmarks/text2image/general_t2i_adapter.py +42 -0
- evalscope-1.0.1/evalscope/benchmarks/text2image/hpdv2_adapter.py +52 -0
- evalscope-1.0.1/evalscope/benchmarks/text2image/tifa_adapter.py +27 -0
- evalscope-1.0.1/evalscope/benchmarks/tool_bench/tool_bench_adapter.py +102 -0
- evalscope-1.0.1/evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +74 -0
- evalscope-1.0.1/evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +91 -0
- {evalscope-0.17.1/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models → evalscope-1.0.1/evalscope/benchmarks/winogrande}/__init__.py +0 -0
- evalscope-1.0.1/evalscope/benchmarks/winogrande/winogrande_adapter.py +34 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/cli/cli.py +2 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/cli/start_app.py +7 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/cli/start_perf.py +7 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/cli/start_server.py +6 -3
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/collections/__init__.py +2 -10
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/collections/sampler.py +10 -10
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/collections/schema.py +13 -11
- evalscope-1.0.1/evalscope/config.py +273 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/constants.py +37 -61
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/evaluator/__init__.py +1 -1
- evalscope-1.0.1/evalscope/evaluator/evaluator.py +339 -0
- evalscope-1.0.1/evalscope/filters/__init__.py +2 -0
- evalscope-1.0.1/evalscope/filters/extraction.py +126 -0
- evalscope-1.0.1/evalscope/filters/selection.py +57 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/__init__.py +13 -13
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/llm_judge.py +47 -33
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/math_parser.py +27 -22
- evalscope-1.0.1/evalscope/metrics/metric.py +307 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/metrics.py +22 -18
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +4 -2
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +9 -13
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +3 -2
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +2 -2
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +4 -2
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +10 -5
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +4 -2
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +2 -1
- evalscope-1.0.1/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +15 -9
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +4 -2
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +15 -10
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +9 -6
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +2 -2
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +4 -2
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +4 -2
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +3 -9
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +16 -10
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +3 -2
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +4 -2
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +8 -4
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +47 -25
- evalscope-1.0.1/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +12 -7
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +23 -17
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +33 -23
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +46 -30
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +69 -37
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +7 -5
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +6 -4
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +7 -5
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +3 -2
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +5 -2
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +17 -13
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +35 -19
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +14 -12
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +63 -52
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +63 -38
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +6 -3
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +6 -2
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +3 -2
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +15 -13
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +3 -2
- evalscope-1.0.1/evalscope/models/__init__.py +26 -0
- evalscope-1.0.1/evalscope/models/image_edit_model.py +125 -0
- evalscope-1.0.1/evalscope/models/mockllm.py +65 -0
- evalscope-1.0.1/evalscope/models/model_apis.py +67 -0
- evalscope-1.0.1/evalscope/models/modelscope.py +455 -0
- evalscope-1.0.1/evalscope/models/openai_compatible.py +126 -0
- evalscope-1.0.1/evalscope/models/text2image_model.py +124 -0
- evalscope-1.0.1/evalscope/models/utils/openai.py +701 -0
- evalscope-1.0.1/evalscope/perf/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/benchmark.py +4 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/http_client.py +4 -2
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/api/custom_api.py +5 -4
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/api/openai_api.py +11 -9
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/custom.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/flickr8k.py +1 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/kontext_bench.py +1 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/line_by_line.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/longalpaca.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/openqa.py +4 -2
- evalscope-1.0.1/evalscope/perf/utils/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/utils/benchmark_util.py +15 -10
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/utils/db_util.py +9 -6
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/utils/local_server.py +11 -3
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/utils/rich_display.py +16 -10
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/report/__init__.py +2 -3
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/report/combinator.py +18 -12
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/report/generator.py +51 -35
- evalscope-0.17.1/evalscope/report/utils.py → evalscope-1.0.1/evalscope/report/report.py +8 -6
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/run.py +33 -47
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/summarizer.py +1 -1
- evalscope-1.0.1/evalscope/third_party/thinkbench/tools/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -4
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/utils/__init__.py +21 -2
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/utils/chat_service.py +3 -2
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/utils/deprecation_utils.py +12 -1
- evalscope-1.0.1/evalscope/utils/function_utils.py +29 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/utils/import_utils.py +23 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/utils/io_utils.py +142 -6
- evalscope-1.0.1/evalscope/utils/json_schema.py +208 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/utils/logger.py +51 -12
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/utils/model_utils.py +11 -7
- evalscope-1.0.1/evalscope/utils/multi_choices.py +288 -0
- evalscope-1.0.1/evalscope/utils/url_utils.py +65 -0
- evalscope-1.0.1/evalscope/version.py +4 -0
- {evalscope-0.17.1 → evalscope-1.0.1/evalscope.egg-info}/PKG-INFO +87 -49
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope.egg-info/SOURCES.txt +83 -51
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope.egg-info/requires.txt +21 -13
- {evalscope-0.17.1 → evalscope-1.0.1}/requirements/aigc.txt +2 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/requirements/dev.txt +1 -1
- {evalscope-0.17.1 → evalscope-1.0.1}/requirements/framework.txt +7 -8
- {evalscope-0.17.1 → evalscope-1.0.1}/requirements/rag.txt +1 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/setup.cfg +14 -5
- evalscope-1.0.1/tests/benchmark/test_eval.py +385 -0
- evalscope-1.0.1/tests/benchmark/test_image_edit.py +65 -0
- {evalscope-0.17.1/tests/aigc → evalscope-1.0.1/tests/benchmark}/test_t2i.py +22 -4
- evalscope-1.0.1/tests/benchmark/test_vlm.py +80 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/tests/cli/test_all.py +85 -47
- {evalscope-0.17.1 → evalscope-1.0.1}/tests/cli/test_collection.py +20 -8
- {evalscope-0.17.1 → evalscope-1.0.1}/tests/cli/test_custom.py +22 -15
- evalscope-1.0.1/tests/cli/test_reasoning.py +81 -0
- evalscope-1.0.1/tests/common.py +73 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/tests/perf/test_perf.py +4 -2
- evalscope-1.0.1/tests/rag/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/tests/rag/test_clip_benchmark.py +0 -2
- evalscope-0.17.1/evalscope/__init__.py +0 -5
- evalscope-0.17.1/evalscope/benchmarks/aigc/t2i/base.py +0 -56
- evalscope-0.17.1/evalscope/benchmarks/aigc/t2i/evalmuse_adapter.py +0 -78
- evalscope-0.17.1/evalscope/benchmarks/aigc/t2i/genai_bench_adapter.py +0 -58
- evalscope-0.17.1/evalscope/benchmarks/aigc/t2i/general_t2i_adapter.py +0 -58
- evalscope-0.17.1/evalscope/benchmarks/aigc/t2i/hpdv2_adapter.py +0 -57
- evalscope-0.17.1/evalscope/benchmarks/aigc/t2i/tifa_adapter.py +0 -37
- evalscope-0.17.1/evalscope/benchmarks/aime/aime24_adapter.py +0 -52
- evalscope-0.17.1/evalscope/benchmarks/aime/aime25_adapter.py +0 -52
- evalscope-0.17.1/evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +0 -107
- evalscope-0.17.1/evalscope/benchmarks/arc/ai2_arc.py +0 -151
- evalscope-0.17.1/evalscope/benchmarks/arc/arc_adapter.py +0 -159
- evalscope-0.17.1/evalscope/benchmarks/arena_hard/arena_hard_adapter.py +0 -122
- evalscope-0.17.1/evalscope/benchmarks/bbh/bbh_adapter.py +0 -247
- evalscope-0.17.1/evalscope/benchmarks/benchmark.py +0 -81
- evalscope-0.17.1/evalscope/benchmarks/bfcl/bfcl_adapter.py +0 -237
- evalscope-0.17.1/evalscope/benchmarks/ceval/ceval_adapter.py +0 -238
- evalscope-0.17.1/evalscope/benchmarks/ceval/ceval_exam.py +0 -146
- evalscope-0.17.1/evalscope/benchmarks/cmmlu/cmmlu.py +0 -161
- evalscope-0.17.1/evalscope/benchmarks/cmmlu/cmmlu_adapter.py +0 -213
- evalscope-0.17.1/evalscope/benchmarks/cmmlu/samples.jsonl +0 -5
- evalscope-0.17.1/evalscope/benchmarks/competition_math/competition_math.py +0 -79
- evalscope-0.17.1/evalscope/benchmarks/competition_math/competition_math_adapter.py +0 -125
- evalscope-0.17.1/evalscope/benchmarks/data_adapter.py +0 -528
- evalscope-0.17.1/evalscope/benchmarks/data_collection/data_collection_adapter.py +0 -72
- evalscope-0.17.1/evalscope/benchmarks/docmath/docmath_adapter.py +0 -85
- evalscope-0.17.1/evalscope/benchmarks/filters.py +0 -59
- evalscope-0.17.1/evalscope/benchmarks/frames/frames_adapter.py +0 -91
- evalscope-0.17.1/evalscope/benchmarks/general_mcq/general_mcq_adapter.py +0 -119
- evalscope-0.17.1/evalscope/benchmarks/general_qa/general_qa_adapter.py +0 -155
- evalscope-0.17.1/evalscope/benchmarks/gpqa/gpqa_adapter.py +0 -129
- evalscope-0.17.1/evalscope/benchmarks/gsm8k/gsm8k.py +0 -121
- evalscope-0.17.1/evalscope/benchmarks/gsm8k/gsm8k_adapter.py +0 -156
- evalscope-0.17.1/evalscope/benchmarks/hellaswag/hellaswag.py +0 -112
- evalscope-0.17.1/evalscope/benchmarks/hellaswag/hellaswag_adapter.py +0 -151
- evalscope-0.17.1/evalscope/benchmarks/hle/hle_adapter.py +0 -118
- evalscope-0.17.1/evalscope/benchmarks/humaneval/humaneval.py +0 -79
- evalscope-0.17.1/evalscope/benchmarks/humaneval/humaneval_adapter.py +0 -93
- evalscope-0.17.1/evalscope/benchmarks/ifeval/ifeval_adapter.py +0 -54
- evalscope-0.17.1/evalscope/benchmarks/iquiz/iquiz_adapter.py +0 -70
- evalscope-0.17.1/evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +0 -88
- evalscope-0.17.1/evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +0 -82
- evalscope-0.17.1/evalscope/benchmarks/math_500/math_500_adapter.py +0 -58
- evalscope-0.17.1/evalscope/benchmarks/mmlu/mmlu.py +0 -160
- evalscope-0.17.1/evalscope/benchmarks/mmlu/mmlu_adapter.py +0 -280
- evalscope-0.17.1/evalscope/benchmarks/mmlu/samples.jsonl +0 -5
- evalscope-0.17.1/evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +0 -113
- evalscope-0.17.1/evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +0 -185
- evalscope-0.17.1/evalscope/benchmarks/musr/musr_adapter.py +0 -74
- evalscope-0.17.1/evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +0 -348
- evalscope-0.17.1/evalscope/benchmarks/process_bench/critique_template.txt +0 -13
- evalscope-0.17.1/evalscope/benchmarks/process_bench/process_bench_adapter.py +0 -102
- evalscope-0.17.1/evalscope/benchmarks/race/race.py +0 -104
- evalscope-0.17.1/evalscope/benchmarks/race/race_adapter.py +0 -135
- evalscope-0.17.1/evalscope/benchmarks/race/samples.jsonl +0 -5
- evalscope-0.17.1/evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +0 -209
- evalscope-0.17.1/evalscope/benchmarks/super_gpqa/zero_shot_prompt.txt +0 -4
- evalscope-0.17.1/evalscope/benchmarks/tau_bench/tau_bench_adapter.py +0 -110
- evalscope-0.17.1/evalscope/benchmarks/tool_bench/tool_bench_adapter.py +0 -81
- evalscope-0.17.1/evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -89
- evalscope-0.17.1/evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +0 -142
- evalscope-0.17.1/evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -163
- evalscope-0.17.1/evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +0 -287
- evalscope-0.17.1/evalscope/benchmarks/utils.py +0 -60
- evalscope-0.17.1/evalscope/benchmarks/winogrande/winogrande_adapter.py +0 -60
- evalscope-0.17.1/evalscope/collections/evaluator.py +0 -375
- evalscope-0.17.1/evalscope/config.py +0 -173
- evalscope-0.17.1/evalscope/evaluator/evaluator.py +0 -483
- evalscope-0.17.1/evalscope/metrics/completion_parsers.py +0 -227
- evalscope-0.17.1/evalscope/metrics/named_metrics.py +0 -55
- evalscope-0.17.1/evalscope/metrics/t2v_metrics/__init__.py +0 -52
- evalscope-0.17.1/evalscope/models/__init__.py +0 -49
- evalscope-0.17.1/evalscope/models/adapters/__init__.py +0 -14
- evalscope-0.17.1/evalscope/models/adapters/base_adapter.py +0 -84
- evalscope-0.17.1/evalscope/models/adapters/bfcl_adapter.py +0 -246
- evalscope-0.17.1/evalscope/models/adapters/chat_adapter.py +0 -207
- evalscope-0.17.1/evalscope/models/adapters/choice_adapter.py +0 -222
- evalscope-0.17.1/evalscope/models/adapters/custom_adapter.py +0 -71
- evalscope-0.17.1/evalscope/models/adapters/server_adapter.py +0 -236
- evalscope-0.17.1/evalscope/models/adapters/t2i_adapter.py +0 -79
- evalscope-0.17.1/evalscope/models/adapters/tau_bench_adapter.py +0 -189
- evalscope-0.17.1/evalscope/models/custom/__init__.py +0 -4
- evalscope-0.17.1/evalscope/models/custom/custom_model.py +0 -50
- evalscope-0.17.1/evalscope/models/custom/dummy_model.py +0 -99
- evalscope-0.17.1/evalscope/models/local_model.py +0 -128
- evalscope-0.17.1/evalscope/models/register.py +0 -41
- evalscope-0.17.1/evalscope/version.py +0 -4
- evalscope-0.17.1/tests/cli/test_run.py +0 -489
- {evalscope-0.17.1 → evalscope-1.0.1}/LICENSE +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/MANIFEST.in +0 -0
- {evalscope-0.17.1/evalscope/backend → evalscope-1.0.1/evalscope/api}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/arguments.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/constants.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/ui/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/ui/sidebar.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/ui/visualization.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/utils/localization.py +0 -0
- {evalscope-0.17.1/evalscope/backend/rag_eval/clip_benchmark/tasks → evalscope-1.0.1/evalscope/backend}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/base.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/opencompass/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/opencompass/tasks/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/opencompass/tasks/eval_api.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/opencompass/tasks/eval_datasets.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/backend_manager.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/arguments.py +0 -0
- {evalscope-0.17.1/evalscope/backend/rag_eval/utils → evalscope-1.0.1/evalscope/backend/rag_eval/clip_benchmark/tasks}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/tasks/image_caption.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_classification.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_retrieval.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/utils/webdataset_convert.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/utils/webdatasets.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/arguments.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/base.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/task_template.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/Classification.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/Clustering.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/CustomTask.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/PairClassification.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/Reranking.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/Retrieval.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/STS.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/arguments.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/prompts/persona_prompt.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/tasks/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/aigc → evalscope-1.0.1/evalscope/backend/rag_eval/utils}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/utils/clip.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/utils/tools.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/vlm_eval_kit/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/vlm_eval_kit/backend_manager.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/aigc/t2i → evalscope-1.0.1/evalscope/benchmarks/aime}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/aime → evalscope-1.0.1/evalscope/benchmarks/alpaca_eval}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/arc/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/alpaca_eval → evalscope-1.0.1/evalscope/benchmarks/arena_hard}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/boolean_expressions.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/causal_judgement.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/date_understanding.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/disambiguation_qa.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/dyck_languages.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/formal_fallacies.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/geometric_shapes.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/hyperbaton.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_five_objects.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_seven_objects.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_three_objects.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/movie_recommendation.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/multistep_arithmetic_two.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/navigate.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/object_counting.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/penguins_in_a_table.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/reasoning_about_colored_objects.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/ruin_names.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/salient_translation_error_detection.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/snarks.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/sports_understanding.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/temporal_sequences.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_five_objects.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_seven_objects.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_three_objects.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/web_of_lies.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/word_sorting.txt +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/arena_hard → evalscope-1.0.1/evalscope/benchmarks/bfcl}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/ceval/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/bfcl → evalscope-1.0.1/evalscope/benchmarks/chinese_simple_qa}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/cmmlu/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/competition_math/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/chinese_simple_qa → evalscope-1.0.1/evalscope/benchmarks/data_collection}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/data_collection → evalscope-1.0.1/evalscope/benchmarks/docmath}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/docmath → evalscope-1.0.1/evalscope/benchmarks/drop}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/drop/utils.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/drop → evalscope-1.0.1/evalscope/benchmarks/frames}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/frames/utils.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/frames → evalscope-1.0.1/evalscope/benchmarks/general_arena}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/general_arena → evalscope-1.0.1/evalscope/benchmarks/general_mcq}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/general_qa/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/general_mcq → evalscope-1.0.1/evalscope/benchmarks/gpqa}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/gsm8k/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/hellaswag/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/gpqa → evalscope-1.0.1/evalscope/benchmarks/hle}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/humaneval/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/hle → evalscope-1.0.1/evalscope/benchmarks/ifeval}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/ifeval → evalscope-1.0.1/evalscope/benchmarks/image_edit}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/iquiz → evalscope-1.0.1/evalscope/benchmarks/image_edit/gedit}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/live_code_bench → evalscope-1.0.1/evalscope/benchmarks/iquiz}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/maritime_bench → evalscope-1.0.1/evalscope/benchmarks/live_code_bench}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/live_code_bench/extract_utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/live_code_bench/pass_k_utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/live_code_bench/prompts.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/math_500 → evalscope-1.0.1/evalscope/benchmarks/maritime_bench}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/mmlu_pro → evalscope-1.0.1/evalscope/benchmarks/math_500}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/mmlu_redux → evalscope-1.0.1/evalscope/benchmarks/math_vista}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/mmlu/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/musr → evalscope-1.0.1/evalscope/benchmarks/mmlu_pro}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/needle_haystack → evalscope-1.0.1/evalscope/benchmarks/mmlu_redux}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/process_bench → evalscope-1.0.1/evalscope/benchmarks/mmmu}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/simple_qa → evalscope-1.0.1/evalscope/benchmarks/mmmu_pro}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/super_gpqa → evalscope-1.0.1/evalscope/benchmarks/musr}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/tau_bench → evalscope-1.0.1/evalscope/benchmarks/needle_haystack}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/needle_haystack/utils.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/tool_bench → evalscope-1.0.1/evalscope/benchmarks/process_bench}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/race/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/winogrande → evalscope-1.0.1/evalscope/benchmarks/simple_qa}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/metrics/t2v_metrics/models → evalscope-1.0.1/evalscope/benchmarks/super_gpqa}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model → evalscope-1.0.1/evalscope/benchmarks/tau_bench}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward → evalscope-1.0.1/evalscope/benchmarks/text2image}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5 → evalscope-1.0.1/evalscope/benchmarks/tool_bench}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/tool_bench/utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/trivia_qa/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/trivia_qa/samples.jsonl +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/truthful_qa/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/cli/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/cli/base.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/cli/start_eval.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/bundled_rouge_score/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/bundled_rouge_score/rouge_scorer.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/rouge_metric.py +0 -0
- {evalscope-0.17.1/evalscope/perf → evalscope-1.0.1/evalscope/metrics/t2v_metrics}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/clipscore.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/constants.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/itmscore.py +0 -0
- {evalscope-0.17.1/evalscope/perf/utils → evalscope-1.0.1/evalscope/metrics/t2v_metrics/models}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/third_party/thinkbench/tools → evalscope-1.0.1/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/base_model.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/__init__.py +0 -0
- {evalscope-0.17.1/tests/rag → evalscope-1.0.1/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/model.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/builder.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_projector/builder.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/default.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_flant5xl.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt2.7b.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt6.7b.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_coco.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xl.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xxl.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna13b.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_no_prefix.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_prefix.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_vitL.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xxl.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt2.7b.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt6.7b.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_vitL.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna13b.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna7b.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config.json +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config_albef.json +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_large_config.json +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/base_model.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_image_text_matching.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_caption.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_classification.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_feature_extractor.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_pretrain.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/base_processor.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/blip_processors.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/score.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/vqascore.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/arguments.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/main.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/api/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/api/base.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/api/dashscope_api.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/api/default_api.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/base.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/random_dataset.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/random_vl_dataset.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/speed_benchmark.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/registry.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/utils/analysis_result.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/utils/handler.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/utils/log_utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/README.md +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/default_task.json +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/default_task.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/eval.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/infer.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/longbench_write.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/resources/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/resources/judge.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/resources/longbench_write.jsonl +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/resources/longbench_write_en.jsonl +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/resources/longwrite_ruler.jsonl +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/tools/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/tools/data_etl.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/tools/openai_api.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/thinkbench/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/thinkbench/eval.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/thinkbench/infer.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/thinkbench/resources/critique_template.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/thinkbench/resources/reformat_template.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/thinkbench/tools/llm.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/thinkbench/tools/utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/README.md +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/config_default.json +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/config_default.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/eval.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/infer.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/llm/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/requirements.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/toolbench_static.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/utils/argument_utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope.egg-info/dependency_links.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope.egg-info/entry_points.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope.egg-info/not-zip-safe +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/evalscope.egg-info/top_level.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/requirements/app.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/requirements/docs.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/requirements/opencompass.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/requirements/perf.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/requirements/vlmeval.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/requirements.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/setup.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/tests/__init__.py +0 -0
- {evalscope-0.17.1/tests/aigc → evalscope-1.0.1/tests/benchmark}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/tests/cli/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/tests/perf/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/tests/rag/test_mteb.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/tests/rag/test_ragas.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/tests/swift/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/tests/swift/test_run_swift_eval.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/tests/swift/test_run_swift_vlm_eval.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/tests/swift/test_run_swift_vlm_jugde_eval.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/tests/test_run_all.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/tests/utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/tests/vlm/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.1}/tests/vlm/test_vlmeval.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: evalscope
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 1.0.1
|
|
4
4
|
Summary: EvalScope: Lightweight LLMs Evaluation Framework
|
|
5
5
|
Home-page: https://github.com/modelscope/evalscope
|
|
6
6
|
Author: ModelScope team
|
|
@@ -57,9 +57,9 @@ License-File: LICENSE
|
|
|
57
57
|
- [📝 Introduction](#-introduction)
|
|
58
58
|
- [☎ User Groups](#-user-groups)
|
|
59
59
|
- [🎉 News](#-news)
|
|
60
|
-
- [🛠️
|
|
61
|
-
- [Method 1
|
|
62
|
-
- [Method 2
|
|
60
|
+
- [🛠️ Environment Setup](#️-environment-setup)
|
|
61
|
+
- [Method 1. Install via pip](#method-1-install-via-pip)
|
|
62
|
+
- [Method 2. Install from source](#method-2-install-from-source)
|
|
63
63
|
- [🚀 Quick Start](#-quick-start)
|
|
64
64
|
- [Method 1. Using Command Line](#method-1-using-command-line)
|
|
65
65
|
- [Method 2. Using Python Code](#method-2-using-python-code)
|
|
@@ -140,6 +140,15 @@ Please scan the QR code below to join our community groups:
|
|
|
140
140
|
|
|
141
141
|
|
|
142
142
|
## 🎉 News
|
|
143
|
+
|
|
144
|
+
> [!IMPORTANT]
|
|
145
|
+
> **Version 1.0 Refactoring**
|
|
146
|
+
>
|
|
147
|
+
> Version 1.0 introduces a major overhaul of the evaluation framework, establishing a new, more modular and extensible API layer under `evalscope/api`. Key improvements include standardized data models for benchmarks, samples, and results; a registry-based design for components such as benchmarks and metrics; and a rewritten core evaluator that orchestrates the new architecture. Existing benchmark adapters have been migrated to this API, resulting in cleaner, more consistent, and easier-to-maintain implementations.
|
|
148
|
+
|
|
149
|
+
- 🔥 **[2025.09.05]** Added support for vision-language multimodal model evaluation tasks, such as MathVista and MMMU. For more supported datasets, please [refer to the documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/vlm.html).
|
|
150
|
+
- 🔥 **[2025.09.04]** Added support for image editing task evaluation, including the [GEdit-Bench](https://modelscope.cn/datasets/stepfun-ai/GEdit-Bench) benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/image_edit.html).
|
|
151
|
+
- 🔥 **[2025.08.22]** Version 1.0 Refactoring. Break changes, please [refer to](https://evalscope.readthedocs.io/en/latest/get_started/basic_usage.html#switching-to-version-v1-0).
|
|
143
152
|
- 🔥 **[2025.07.18]** The model stress testing now supports randomly generating image-text data for multimodal model evaluation. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#id4).
|
|
144
153
|
- 🔥 **[2025.07.16]** Support for [τ-bench](https://github.com/sierra-research/tau-bench) has been added, enabling the evaluation of AI Agent performance and reliability in real-world scenarios involving dynamic user and tool interactions. For usage instructions, please refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/llm.html#bench).
|
|
145
154
|
- 🔥 **[2025.07.14]** Support for "Humanity's Last Exam" ([Humanity's-Last-Exam](https://modelscope.cn/datasets/cais/hle)), a highly challenging evaluation benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/llm.html#humanity-s-last-exam).
|
|
@@ -147,16 +156,16 @@ Please scan the QR code below to join our community groups:
|
|
|
147
156
|
- 🔥 **[2025.06.28]** Optimized custom dataset evaluation: now supports evaluation without reference answers. Enhanced LLM judge usage, with built-in modes for "scoring directly without reference answers" and "checking answer consistency with reference answers". See [reference](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset/llm.html#qa) for details.
|
|
148
157
|
- 🔥 **[2025.06.19]** Added support for the [BFCL-v3](https://modelscope.cn/datasets/AI-ModelScope/bfcl_v3) benchmark, designed to evaluate model function-calling capabilities across various scenarios. For more information, refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/third_party/bfcl_v3.html).
|
|
149
158
|
- 🔥 **[2025.06.02]** Added support for the Needle-in-a-Haystack test. Simply specify `needle_haystack` to conduct the test, and a corresponding heatmap will be generated in the `outputs/reports` folder, providing a visual representation of the model's performance. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/needle_haystack.html) for more details.
|
|
150
|
-
- 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html).
|
|
159
|
+
- 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html).
|
|
151
160
|
- 🔥 **[2025.05.16]** Model service performance stress testing now supports setting various levels of concurrency and outputs a performance test report. [Reference example](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/quick_start.html#id3).
|
|
152
161
|
- 🔥 **[2025.05.13]** Added support for the [ToolBench-Static](https://modelscope.cn/datasets/AI-ModelScope/ToolBench-Static) dataset to evaluate model's tool-calling capabilities. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) for usage instructions. Also added support for the [DROP](https://modelscope.cn/datasets/AI-ModelScope/DROP/dataPeview) and [Winogrande](https://modelscope.cn/datasets/AI-ModelScope/winogrande_val) benchmarks to assess the reasoning capabilities of models.
|
|
162
|
+
<details><summary>More</summary>
|
|
163
|
+
|
|
153
164
|
- 🔥 **[2025.04.29]** Added Qwen3 Evaluation Best Practices, [welcome to read 📖](https://evalscope.readthedocs.io/en/latest/best_practice/qwen3.html)
|
|
154
165
|
- 🔥 **[2025.04.27]** Support for text-to-image evaluation: Supports 8 metrics including MPS, HPSv2.1Score, etc., and evaluation benchmarks such as EvalMuse, GenAI-Bench. Refer to the [user documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/t2i.html) for more details.
|
|
155
166
|
- 🔥 **[2025.04.10]** Model service stress testing tool now supports the `/v1/completions` endpoint (the default endpoint for vLLM benchmarking)
|
|
156
167
|
- 🔥 **[2025.04.08]** Support for evaluating embedding model services compatible with the OpenAI API has been added. For more details, check the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html#configure-evaluation-parameters).
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
- 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
|
|
168
|
+
- 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
|
|
160
169
|
- 🔥 **[2025.03.20]** The model inference service stress testing now supports generating prompts of specified length using random values. Refer to the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#using-the-random-dataset) for more details.
|
|
161
170
|
- 🔥 **[2025.03.13]** Added support for the [LiveCodeBench](https://www.modelscope.cn/datasets/AI-ModelScope/code_generation_lite/summary) code evaluation benchmark, which can be used by specifying `live_code_bench`. Supports evaluating QwQ-32B on LiveCodeBench, refer to the [best practices](https://evalscope.readthedocs.io/en/latest/best_practice/eval_qwq.html).
|
|
162
171
|
- 🔥 **[2025.03.11]** Added support for the [SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/SimpleQA/summary) and [Chinese SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/Chinese-SimpleQA/summary) evaluation benchmarks. These are used to assess the factual accuracy of models, and you can specify `simple_qa` and `chinese_simpleqa` for use. Support for specifying a judge model is also available. For more details, refer to the [relevant parameter documentation](https://evalscope.readthedocs.io/en/latest/get_started/parameters.html).
|
|
@@ -188,58 +197,87 @@ Please scan the QR code below to join our community groups:
|
|
|
188
197
|
|
|
189
198
|
</details>
|
|
190
199
|
|
|
191
|
-
## 🛠️
|
|
192
|
-
|
|
193
|
-
|
|
200
|
+
## 🛠️ Environment Setup
|
|
201
|
+
|
|
202
|
+
### Method 1. Install via pip
|
|
203
|
+
|
|
204
|
+
We recommend using conda to manage your environment and pip to install dependencies. This allows you to use the latest evalscope PyPI package.
|
|
194
205
|
|
|
195
206
|
1. Create a conda environment (optional)
|
|
207
|
+
```shell
|
|
208
|
+
# Python 3.10 is recommended
|
|
209
|
+
conda create -n evalscope python=3.10
|
|
210
|
+
|
|
211
|
+
# Activate the conda environment
|
|
212
|
+
conda activate evalscope
|
|
213
|
+
```
|
|
214
|
+
2. Install dependencies via pip
|
|
215
|
+
```shell
|
|
216
|
+
pip install evalscope
|
|
217
|
+
```
|
|
218
|
+
3. Install additional dependencies (optional)
|
|
219
|
+
- To use model service inference benchmarking features, install the perf dependency:
|
|
196
220
|
```shell
|
|
197
|
-
|
|
198
|
-
conda create -n evalscope python=3.10
|
|
199
|
-
# Activate the conda environment
|
|
200
|
-
conda activate evalscope
|
|
221
|
+
pip install 'evalscope[perf]'
|
|
201
222
|
```
|
|
202
|
-
|
|
203
|
-
2. Install dependencies using pip
|
|
223
|
+
- To use visualization features, install the app dependency:
|
|
204
224
|
```shell
|
|
205
|
-
pip install evalscope
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
pip install 'evalscope[
|
|
210
|
-
pip install 'evalscope[
|
|
211
|
-
pip install 'evalscope[
|
|
212
|
-
|
|
225
|
+
pip install 'evalscope[app]'
|
|
226
|
+
```
|
|
227
|
+
- If you need to use other evaluation backends, you can install OpenCompass, VLMEvalKit, or RAGEval as needed:
|
|
228
|
+
```shell
|
|
229
|
+
pip install 'evalscope[opencompass]'
|
|
230
|
+
pip install 'evalscope[vlmeval]'
|
|
231
|
+
pip install 'evalscope[rag]'
|
|
232
|
+
```
|
|
233
|
+
- To install all dependencies:
|
|
234
|
+
```shell
|
|
235
|
+
pip install 'evalscope[all]'
|
|
213
236
|
```
|
|
214
237
|
|
|
215
|
-
> [!
|
|
216
|
-
>
|
|
238
|
+
> [!NOTE]
|
|
239
|
+
> The project has been renamed to `evalscope`. For version `v0.4.3` or earlier, you can install it with:
|
|
217
240
|
> ```shell
|
|
218
|
-
>
|
|
241
|
+
> pip install llmuses<=0.4.3
|
|
219
242
|
> ```
|
|
220
|
-
>
|
|
221
|
-
> ```
|
|
243
|
+
> Then, import related dependencies using `llmuses`:
|
|
244
|
+
> ```python
|
|
222
245
|
> from llmuses import ...
|
|
223
246
|
> ```
|
|
224
247
|
|
|
225
|
-
### Method 2
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
git clone https://github.com/modelscope/evalscope.git
|
|
229
|
-
```
|
|
248
|
+
### Method 2. Install from source
|
|
249
|
+
|
|
250
|
+
Installing from source allows you to use the latest code and makes it easier for further development and debugging.
|
|
230
251
|
|
|
252
|
+
1. Clone the source code
|
|
253
|
+
```shell
|
|
254
|
+
git clone https://github.com/modelscope/evalscope.git
|
|
255
|
+
```
|
|
231
256
|
2. Install dependencies
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
257
|
+
```shell
|
|
258
|
+
cd evalscope/
|
|
259
|
+
|
|
260
|
+
pip install -e .
|
|
261
|
+
```
|
|
262
|
+
3. Install additional dependencies
|
|
263
|
+
- To use model service inference benchmarking features, install the perf dependency:
|
|
264
|
+
```shell
|
|
265
|
+
pip install '.[perf]'
|
|
266
|
+
```
|
|
267
|
+
- To use visualization features, install the app dependency:
|
|
268
|
+
```shell
|
|
269
|
+
pip install '.[app]'
|
|
270
|
+
```
|
|
271
|
+
- If you need to use other evaluation backends, you can install OpenCompass, VLMEvalKit, or RAGEval as needed:
|
|
272
|
+
```shell
|
|
273
|
+
pip install '.[opencompass]'
|
|
274
|
+
pip install '.[vlmeval]'
|
|
275
|
+
pip install '.[rag]'
|
|
276
|
+
```
|
|
277
|
+
- To install all dependencies:
|
|
278
|
+
```shell
|
|
279
|
+
pip install '.[all]'
|
|
280
|
+
```
|
|
243
281
|
|
|
244
282
|
|
|
245
283
|
## 🚀 Quick Start
|
|
@@ -326,7 +364,7 @@ run_task(task_cfg="config.json")
|
|
|
326
364
|
|
|
327
365
|
### Basic Parameter
|
|
328
366
|
- `--model`: Specifies the `model_id` of the model in [ModelScope](https://modelscope.cn/), which can be automatically downloaded, e.g., [Qwen/Qwen2.5-0.5B-Instruct](https://modelscope.cn/models/Qwen/Qwen2.5-0.5B-Instruct/summary); or use the local path of the model, e.g., `/path/to/model`
|
|
329
|
-
- `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
|
|
367
|
+
- `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
|
|
330
368
|
- `--limit`: Maximum amount of evaluation data for each dataset. If not specified, it defaults to evaluating all data. Can be used for quick validation
|
|
331
369
|
|
|
332
370
|
### Output Results
|
|
@@ -415,7 +453,7 @@ For more customized evaluations, such as customizing model parameters or dataset
|
|
|
415
453
|
evalscope eval \
|
|
416
454
|
--model Qwen/Qwen3-0.6B \
|
|
417
455
|
--model-args '{"revision": "master", "precision": "torch.float16", "device_map": "auto"}' \
|
|
418
|
-
--generation-config '{"do_sample":true,"temperature":0.6,"
|
|
456
|
+
--generation-config '{"do_sample":true,"temperature":0.6,"max_tokens":512,"chat_template_kwargs":{"enable_thinking": false}}' \
|
|
419
457
|
--dataset-args '{"gsm8k": {"few_shot_num": 0, "few_shot_random": false}}' \
|
|
420
458
|
--datasets gsm8k \
|
|
421
459
|
--limit 10
|
|
@@ -429,7 +467,7 @@ evalscope eval \
|
|
|
429
467
|
- `--generation-config`: Generation parameters, passed as a JSON string and parsed as a dictionary:
|
|
430
468
|
- `do_sample`: Whether to use sampling
|
|
431
469
|
- `temperature`: Generation temperature
|
|
432
|
-
- `
|
|
470
|
+
- `max_tokens`: Maximum length of generated tokens
|
|
433
471
|
- `chat_template_kwargs`: Model inference template parameters
|
|
434
472
|
- `--dataset-args`: Settings for the evaluation dataset, passed as a JSON string where the key is the dataset name and the value is the parameters. Note that these need to correspond one-to-one with the values in the `--datasets` parameter:
|
|
435
473
|
- `few_shot_num`: Number of few-shot examples
|
|
@@ -28,9 +28,9 @@
|
|
|
28
28
|
- [📝 Introduction](#-introduction)
|
|
29
29
|
- [☎ User Groups](#-user-groups)
|
|
30
30
|
- [🎉 News](#-news)
|
|
31
|
-
- [🛠️
|
|
32
|
-
- [Method 1
|
|
33
|
-
- [Method 2
|
|
31
|
+
- [🛠️ Environment Setup](#️-environment-setup)
|
|
32
|
+
- [Method 1. Install via pip](#method-1-install-via-pip)
|
|
33
|
+
- [Method 2. Install from source](#method-2-install-from-source)
|
|
34
34
|
- [🚀 Quick Start](#-quick-start)
|
|
35
35
|
- [Method 1. Using Command Line](#method-1-using-command-line)
|
|
36
36
|
- [Method 2. Using Python Code](#method-2-using-python-code)
|
|
@@ -111,6 +111,15 @@ Please scan the QR code below to join our community groups:
|
|
|
111
111
|
|
|
112
112
|
|
|
113
113
|
## 🎉 News
|
|
114
|
+
|
|
115
|
+
> [!IMPORTANT]
|
|
116
|
+
> **Version 1.0 Refactoring**
|
|
117
|
+
>
|
|
118
|
+
> Version 1.0 introduces a major overhaul of the evaluation framework, establishing a new, more modular and extensible API layer under `evalscope/api`. Key improvements include standardized data models for benchmarks, samples, and results; a registry-based design for components such as benchmarks and metrics; and a rewritten core evaluator that orchestrates the new architecture. Existing benchmark adapters have been migrated to this API, resulting in cleaner, more consistent, and easier-to-maintain implementations.
|
|
119
|
+
|
|
120
|
+
- 🔥 **[2025.09.05]** Added support for vision-language multimodal model evaluation tasks, such as MathVista and MMMU. For more supported datasets, please [refer to the documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/vlm.html).
|
|
121
|
+
- 🔥 **[2025.09.04]** Added support for image editing task evaluation, including the [GEdit-Bench](https://modelscope.cn/datasets/stepfun-ai/GEdit-Bench) benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/image_edit.html).
|
|
122
|
+
- 🔥 **[2025.08.22]** Version 1.0 Refactoring. Break changes, please [refer to](https://evalscope.readthedocs.io/en/latest/get_started/basic_usage.html#switching-to-version-v1-0).
|
|
114
123
|
- 🔥 **[2025.07.18]** The model stress testing now supports randomly generating image-text data for multimodal model evaluation. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#id4).
|
|
115
124
|
- 🔥 **[2025.07.16]** Support for [τ-bench](https://github.com/sierra-research/tau-bench) has been added, enabling the evaluation of AI Agent performance and reliability in real-world scenarios involving dynamic user and tool interactions. For usage instructions, please refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/llm.html#bench).
|
|
116
125
|
- 🔥 **[2025.07.14]** Support for "Humanity's Last Exam" ([Humanity's-Last-Exam](https://modelscope.cn/datasets/cais/hle)), a highly challenging evaluation benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/llm.html#humanity-s-last-exam).
|
|
@@ -118,16 +127,16 @@ Please scan the QR code below to join our community groups:
|
|
|
118
127
|
- 🔥 **[2025.06.28]** Optimized custom dataset evaluation: now supports evaluation without reference answers. Enhanced LLM judge usage, with built-in modes for "scoring directly without reference answers" and "checking answer consistency with reference answers". See [reference](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset/llm.html#qa) for details.
|
|
119
128
|
- 🔥 **[2025.06.19]** Added support for the [BFCL-v3](https://modelscope.cn/datasets/AI-ModelScope/bfcl_v3) benchmark, designed to evaluate model function-calling capabilities across various scenarios. For more information, refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/third_party/bfcl_v3.html).
|
|
120
129
|
- 🔥 **[2025.06.02]** Added support for the Needle-in-a-Haystack test. Simply specify `needle_haystack` to conduct the test, and a corresponding heatmap will be generated in the `outputs/reports` folder, providing a visual representation of the model's performance. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/needle_haystack.html) for more details.
|
|
121
|
-
- 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html).
|
|
130
|
+
- 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html).
|
|
122
131
|
- 🔥 **[2025.05.16]** Model service performance stress testing now supports setting various levels of concurrency and outputs a performance test report. [Reference example](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/quick_start.html#id3).
|
|
123
132
|
- 🔥 **[2025.05.13]** Added support for the [ToolBench-Static](https://modelscope.cn/datasets/AI-ModelScope/ToolBench-Static) dataset to evaluate model's tool-calling capabilities. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) for usage instructions. Also added support for the [DROP](https://modelscope.cn/datasets/AI-ModelScope/DROP/dataPeview) and [Winogrande](https://modelscope.cn/datasets/AI-ModelScope/winogrande_val) benchmarks to assess the reasoning capabilities of models.
|
|
133
|
+
<details><summary>More</summary>
|
|
134
|
+
|
|
124
135
|
- 🔥 **[2025.04.29]** Added Qwen3 Evaluation Best Practices, [welcome to read 📖](https://evalscope.readthedocs.io/en/latest/best_practice/qwen3.html)
|
|
125
136
|
- 🔥 **[2025.04.27]** Support for text-to-image evaluation: Supports 8 metrics including MPS, HPSv2.1Score, etc., and evaluation benchmarks such as EvalMuse, GenAI-Bench. Refer to the [user documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/t2i.html) for more details.
|
|
126
137
|
- 🔥 **[2025.04.10]** Model service stress testing tool now supports the `/v1/completions` endpoint (the default endpoint for vLLM benchmarking)
|
|
127
138
|
- 🔥 **[2025.04.08]** Support for evaluating embedding model services compatible with the OpenAI API has been added. For more details, check the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html#configure-evaluation-parameters).
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
- 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
|
|
139
|
+
- 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
|
|
131
140
|
- 🔥 **[2025.03.20]** The model inference service stress testing now supports generating prompts of specified length using random values. Refer to the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#using-the-random-dataset) for more details.
|
|
132
141
|
- 🔥 **[2025.03.13]** Added support for the [LiveCodeBench](https://www.modelscope.cn/datasets/AI-ModelScope/code_generation_lite/summary) code evaluation benchmark, which can be used by specifying `live_code_bench`. Supports evaluating QwQ-32B on LiveCodeBench, refer to the [best practices](https://evalscope.readthedocs.io/en/latest/best_practice/eval_qwq.html).
|
|
133
142
|
- 🔥 **[2025.03.11]** Added support for the [SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/SimpleQA/summary) and [Chinese SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/Chinese-SimpleQA/summary) evaluation benchmarks. These are used to assess the factual accuracy of models, and you can specify `simple_qa` and `chinese_simpleqa` for use. Support for specifying a judge model is also available. For more details, refer to the [relevant parameter documentation](https://evalscope.readthedocs.io/en/latest/get_started/parameters.html).
|
|
@@ -159,58 +168,87 @@ Please scan the QR code below to join our community groups:
|
|
|
159
168
|
|
|
160
169
|
</details>
|
|
161
170
|
|
|
162
|
-
## 🛠️
|
|
163
|
-
|
|
164
|
-
|
|
171
|
+
## 🛠️ Environment Setup
|
|
172
|
+
|
|
173
|
+
### Method 1. Install via pip
|
|
174
|
+
|
|
175
|
+
We recommend using conda to manage your environment and pip to install dependencies. This allows you to use the latest evalscope PyPI package.
|
|
165
176
|
|
|
166
177
|
1. Create a conda environment (optional)
|
|
178
|
+
```shell
|
|
179
|
+
# Python 3.10 is recommended
|
|
180
|
+
conda create -n evalscope python=3.10
|
|
181
|
+
|
|
182
|
+
# Activate the conda environment
|
|
183
|
+
conda activate evalscope
|
|
184
|
+
```
|
|
185
|
+
2. Install dependencies via pip
|
|
186
|
+
```shell
|
|
187
|
+
pip install evalscope
|
|
188
|
+
```
|
|
189
|
+
3. Install additional dependencies (optional)
|
|
190
|
+
- To use model service inference benchmarking features, install the perf dependency:
|
|
167
191
|
```shell
|
|
168
|
-
|
|
169
|
-
conda create -n evalscope python=3.10
|
|
170
|
-
# Activate the conda environment
|
|
171
|
-
conda activate evalscope
|
|
192
|
+
pip install 'evalscope[perf]'
|
|
172
193
|
```
|
|
173
|
-
|
|
174
|
-
2. Install dependencies using pip
|
|
194
|
+
- To use visualization features, install the app dependency:
|
|
175
195
|
```shell
|
|
176
|
-
pip install evalscope
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
pip install 'evalscope[
|
|
181
|
-
pip install 'evalscope[
|
|
182
|
-
pip install 'evalscope[
|
|
183
|
-
|
|
196
|
+
pip install 'evalscope[app]'
|
|
197
|
+
```
|
|
198
|
+
- If you need to use other evaluation backends, you can install OpenCompass, VLMEvalKit, or RAGEval as needed:
|
|
199
|
+
```shell
|
|
200
|
+
pip install 'evalscope[opencompass]'
|
|
201
|
+
pip install 'evalscope[vlmeval]'
|
|
202
|
+
pip install 'evalscope[rag]'
|
|
203
|
+
```
|
|
204
|
+
- To install all dependencies:
|
|
205
|
+
```shell
|
|
206
|
+
pip install 'evalscope[all]'
|
|
184
207
|
```
|
|
185
208
|
|
|
186
|
-
> [!
|
|
187
|
-
>
|
|
209
|
+
> [!NOTE]
|
|
210
|
+
> The project has been renamed to `evalscope`. For version `v0.4.3` or earlier, you can install it with:
|
|
188
211
|
> ```shell
|
|
189
|
-
>
|
|
212
|
+
> pip install llmuses<=0.4.3
|
|
190
213
|
> ```
|
|
191
|
-
>
|
|
192
|
-
> ```
|
|
214
|
+
> Then, import related dependencies using `llmuses`:
|
|
215
|
+
> ```python
|
|
193
216
|
> from llmuses import ...
|
|
194
217
|
> ```
|
|
195
218
|
|
|
196
|
-
### Method 2
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
git clone https://github.com/modelscope/evalscope.git
|
|
200
|
-
```
|
|
219
|
+
### Method 2. Install from source
|
|
220
|
+
|
|
221
|
+
Installing from source allows you to use the latest code and makes it easier for further development and debugging.
|
|
201
222
|
|
|
223
|
+
1. Clone the source code
|
|
224
|
+
```shell
|
|
225
|
+
git clone https://github.com/modelscope/evalscope.git
|
|
226
|
+
```
|
|
202
227
|
2. Install dependencies
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
228
|
+
```shell
|
|
229
|
+
cd evalscope/
|
|
230
|
+
|
|
231
|
+
pip install -e .
|
|
232
|
+
```
|
|
233
|
+
3. Install additional dependencies
|
|
234
|
+
- To use model service inference benchmarking features, install the perf dependency:
|
|
235
|
+
```shell
|
|
236
|
+
pip install '.[perf]'
|
|
237
|
+
```
|
|
238
|
+
- To use visualization features, install the app dependency:
|
|
239
|
+
```shell
|
|
240
|
+
pip install '.[app]'
|
|
241
|
+
```
|
|
242
|
+
- If you need to use other evaluation backends, you can install OpenCompass, VLMEvalKit, or RAGEval as needed:
|
|
243
|
+
```shell
|
|
244
|
+
pip install '.[opencompass]'
|
|
245
|
+
pip install '.[vlmeval]'
|
|
246
|
+
pip install '.[rag]'
|
|
247
|
+
```
|
|
248
|
+
- To install all dependencies:
|
|
249
|
+
```shell
|
|
250
|
+
pip install '.[all]'
|
|
251
|
+
```
|
|
214
252
|
|
|
215
253
|
|
|
216
254
|
## 🚀 Quick Start
|
|
@@ -297,7 +335,7 @@ run_task(task_cfg="config.json")
|
|
|
297
335
|
|
|
298
336
|
### Basic Parameter
|
|
299
337
|
- `--model`: Specifies the `model_id` of the model in [ModelScope](https://modelscope.cn/), which can be automatically downloaded, e.g., [Qwen/Qwen2.5-0.5B-Instruct](https://modelscope.cn/models/Qwen/Qwen2.5-0.5B-Instruct/summary); or use the local path of the model, e.g., `/path/to/model`
|
|
300
|
-
- `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
|
|
338
|
+
- `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
|
|
301
339
|
- `--limit`: Maximum amount of evaluation data for each dataset. If not specified, it defaults to evaluating all data. Can be used for quick validation
|
|
302
340
|
|
|
303
341
|
### Output Results
|
|
@@ -386,7 +424,7 @@ For more customized evaluations, such as customizing model parameters or dataset
|
|
|
386
424
|
evalscope eval \
|
|
387
425
|
--model Qwen/Qwen3-0.6B \
|
|
388
426
|
--model-args '{"revision": "master", "precision": "torch.float16", "device_map": "auto"}' \
|
|
389
|
-
--generation-config '{"do_sample":true,"temperature":0.6,"
|
|
427
|
+
--generation-config '{"do_sample":true,"temperature":0.6,"max_tokens":512,"chat_template_kwargs":{"enable_thinking": false}}' \
|
|
390
428
|
--dataset-args '{"gsm8k": {"few_shot_num": 0, "few_shot_random": false}}' \
|
|
391
429
|
--datasets gsm8k \
|
|
392
430
|
--limit 10
|
|
@@ -400,7 +438,7 @@ evalscope eval \
|
|
|
400
438
|
- `--generation-config`: Generation parameters, passed as a JSON string and parsed as a dictionary:
|
|
401
439
|
- `do_sample`: Whether to use sampling
|
|
402
440
|
- `temperature`: Generation temperature
|
|
403
|
-
- `
|
|
441
|
+
- `max_tokens`: Maximum length of generated tokens
|
|
404
442
|
- `chat_template_kwargs`: Model inference template parameters
|
|
405
443
|
- `--dataset-args`: Settings for the evaluation dataset, passed as a JSON string where the key is the dataset name and the value is the parameters. Note that these need to correspond one-to-one with the values in the `--datasets` parameter:
|
|
406
444
|
- `few_shot_num`: Number of few-shot examples
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
|
+
from evalscope.benchmarks import * # registered benchmarks
|
|
3
|
+
from evalscope.config import TaskConfig
|
|
4
|
+
from evalscope.filters import extraction, selection # registered filters
|
|
5
|
+
from evalscope.metrics import metric # registered metrics
|
|
6
|
+
from evalscope.models import model_apis # need for register model apis
|
|
7
|
+
from evalscope.run import run_task
|
|
8
|
+
from .version import __release_datetime__, __version__
|