evalscope 0.17.1__tar.gz → 1.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of evalscope might be problematic. Click here for more details.
- {evalscope-0.17.1/evalscope.egg-info → evalscope-1.0.0}/PKG-INFO +80 -44
- {evalscope-0.17.1 → evalscope-1.0.0}/README.md +79 -43
- evalscope-1.0.0/evalscope/__init__.py +8 -0
- evalscope-1.0.0/evalscope/api/benchmark/__init__.py +3 -0
- evalscope-1.0.0/evalscope/api/benchmark/adapters/__init__.py +3 -0
- evalscope-1.0.0/evalscope/api/benchmark/adapters/default_data_adapter.py +683 -0
- evalscope-1.0.0/evalscope/api/benchmark/adapters/multi_choice_adapter.py +83 -0
- evalscope-1.0.0/evalscope/api/benchmark/adapters/text2image_adapter.py +155 -0
- evalscope-1.0.0/evalscope/api/benchmark/benchmark.py +321 -0
- evalscope-1.0.0/evalscope/api/benchmark/meta.py +115 -0
- evalscope-1.0.0/evalscope/api/dataset/__init__.py +2 -0
- evalscope-1.0.0/evalscope/api/dataset/dataset.py +349 -0
- evalscope-1.0.0/evalscope/api/dataset/loader.py +261 -0
- evalscope-1.0.0/evalscope/api/dataset/utils.py +143 -0
- evalscope-1.0.0/evalscope/api/evaluator/__init__.py +3 -0
- evalscope-1.0.0/evalscope/api/evaluator/cache.py +355 -0
- evalscope-1.0.0/evalscope/api/evaluator/evaluator.py +56 -0
- evalscope-1.0.0/evalscope/api/evaluator/state.py +264 -0
- evalscope-1.0.0/evalscope/api/filter/__init__.py +1 -0
- evalscope-1.0.0/evalscope/api/filter/filter.py +72 -0
- evalscope-1.0.0/evalscope/api/messages/__init__.py +11 -0
- evalscope-1.0.0/evalscope/api/messages/chat_message.py +198 -0
- evalscope-1.0.0/evalscope/api/messages/content.py +102 -0
- evalscope-1.0.0/evalscope/api/messages/utils.py +35 -0
- evalscope-1.0.0/evalscope/api/metric/__init__.py +2 -0
- evalscope-1.0.0/evalscope/api/metric/metric.py +55 -0
- evalscope-1.0.0/evalscope/api/metric/scorer.py +105 -0
- evalscope-1.0.0/evalscope/api/mixin/__init__.py +2 -0
- evalscope-1.0.0/evalscope/api/mixin/dataset_mixin.py +105 -0
- evalscope-1.0.0/evalscope/api/mixin/llm_judge_mixin.py +168 -0
- evalscope-1.0.0/evalscope/api/model/__init__.py +12 -0
- evalscope-1.0.0/evalscope/api/model/generate_config.py +157 -0
- evalscope-1.0.0/evalscope/api/model/model.py +383 -0
- evalscope-1.0.0/evalscope/api/model/model_output.py +285 -0
- evalscope-1.0.0/evalscope/api/registry.py +182 -0
- evalscope-1.0.0/evalscope/api/tool/__init__.py +3 -0
- evalscope-1.0.0/evalscope/api/tool/tool_call.py +101 -0
- evalscope-1.0.0/evalscope/api/tool/tool_info.py +173 -0
- evalscope-1.0.0/evalscope/api/tool/utils.py +64 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/app/ui/app_ui.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/app/ui/multi_model.py +50 -25
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/app/ui/single_model.py +23 -11
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/app/utils/data_utils.py +42 -26
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/app/utils/text_utils.py +0 -2
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/app/utils/visualization.py +9 -4
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/arguments.py +6 -7
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/opencompass/api_meta_template.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/opencompass/backend_manager.py +6 -3
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +10 -10
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/clip_benchmark/task_template.py +8 -4
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/ragas/task_template.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/ragas/tasks/build_transform.py +7 -4
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/utils/embedding.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/utils/llm.py +13 -12
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/__init__.py +0 -2
- evalscope-0.17.1/evalscope/benchmarks/aigc/t2i/general_t2i_adapter.py → evalscope-1.0.0/evalscope/benchmarks/aigc/i2i/general_i2i_adapter.py +1 -15
- evalscope-1.0.0/evalscope/benchmarks/aigc/t2i/evalmuse_adapter.py +76 -0
- evalscope-1.0.0/evalscope/benchmarks/aigc/t2i/genai_bench_adapter.py +53 -0
- evalscope-1.0.0/evalscope/benchmarks/aigc/t2i/general_t2i_adapter.py +42 -0
- evalscope-1.0.0/evalscope/benchmarks/aigc/t2i/hpdv2_adapter.py +47 -0
- evalscope-1.0.0/evalscope/benchmarks/aigc/t2i/tifa_adapter.py +26 -0
- evalscope-1.0.0/evalscope/benchmarks/aime/aime24_adapter.py +50 -0
- evalscope-1.0.0/evalscope/benchmarks/aime/aime25_adapter.py +46 -0
- evalscope-1.0.0/evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +133 -0
- evalscope-1.0.0/evalscope/benchmarks/arc/arc_adapter.py +46 -0
- evalscope-1.0.0/evalscope/benchmarks/arena_hard/arena_hard_adapter.py +148 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/arena_hard/utils.py +37 -1
- evalscope-1.0.0/evalscope/benchmarks/bbh/bbh_adapter.py +175 -0
- evalscope-1.0.0/evalscope/benchmarks/bfcl/bfcl_adapter.py +258 -0
- evalscope-1.0.0/evalscope/benchmarks/bfcl/generation.py +222 -0
- evalscope-1.0.0/evalscope/benchmarks/ceval/ceval_adapter.py +170 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +85 -82
- evalscope-1.0.0/evalscope/benchmarks/cmmlu/cmmlu_adapter.py +122 -0
- evalscope-1.0.0/evalscope/benchmarks/competition_math/competition_math_adapter.py +73 -0
- evalscope-1.0.0/evalscope/benchmarks/data_collection/data_collection_adapter.py +210 -0
- evalscope-1.0.0/evalscope/benchmarks/docmath/docmath_adapter.py +143 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/docmath/utils.py +4 -5
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/drop/drop_adapter.py +88 -40
- evalscope-1.0.0/evalscope/benchmarks/frames/frames_adapter.py +174 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/general_arena/general_arena_adapter.py +136 -98
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/general_arena/utils.py +23 -27
- evalscope-1.0.0/evalscope/benchmarks/general_mcq/general_mcq_adapter.py +58 -0
- evalscope-1.0.0/evalscope/benchmarks/general_qa/general_qa_adapter.py +94 -0
- evalscope-1.0.0/evalscope/benchmarks/gpqa/gpqa_adapter.py +90 -0
- evalscope-0.17.1/evalscope/benchmarks/gpqa/chain_of_thought.txt → evalscope-1.0.0/evalscope/benchmarks/gpqa/prompt.py +12 -5
- evalscope-1.0.0/evalscope/benchmarks/gsm8k/gsm8k_adapter.py +76 -0
- evalscope-1.0.0/evalscope/benchmarks/hellaswag/hellaswag_adapter.py +62 -0
- evalscope-1.0.0/evalscope/benchmarks/hle/hle_adapter.py +152 -0
- evalscope-1.0.0/evalscope/benchmarks/humaneval/humaneval_adapter.py +124 -0
- evalscope-1.0.0/evalscope/benchmarks/ifeval/ifeval_adapter.py +83 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/ifeval/instructions.py +109 -64
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/ifeval/instructions_registry.py +1 -1
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/ifeval/utils.py +6 -7
- evalscope-1.0.0/evalscope/benchmarks/iquiz/iquiz_adapter.py +35 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/live_code_bench/evaluate_utils.py +2 -2
- evalscope-1.0.0/evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +138 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/live_code_bench/load_utils.py +13 -21
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/live_code_bench/testing_util.py +6 -2
- evalscope-1.0.0/evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +56 -0
- evalscope-1.0.0/evalscope/benchmarks/math_500/math_500_adapter.py +51 -0
- evalscope-1.0.0/evalscope/benchmarks/mmlu/mmlu_adapter.py +107 -0
- evalscope-1.0.0/evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +94 -0
- evalscope-1.0.0/evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +139 -0
- evalscope-1.0.0/evalscope/benchmarks/musr/musr_adapter.py +43 -0
- evalscope-1.0.0/evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +388 -0
- evalscope-1.0.0/evalscope/benchmarks/process_bench/process_bench_adapter.py +170 -0
- evalscope-1.0.0/evalscope/benchmarks/race/race_adapter.py +49 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/simple_qa/simple_qa_adapter.py +72 -70
- evalscope-0.17.1/evalscope/benchmarks/super_gpqa/five_shot_prompt.txt → evalscope-1.0.0/evalscope/benchmarks/super_gpqa/prompt.py +14 -16
- evalscope-1.0.0/evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +165 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/super_gpqa/utils.py +2 -1
- evalscope-1.0.0/evalscope/benchmarks/tau_bench/generation.py +147 -0
- evalscope-1.0.0/evalscope/benchmarks/tau_bench/tau_bench_adapter.py +168 -0
- evalscope-1.0.0/evalscope/benchmarks/tool_bench/tool_bench_adapter.py +102 -0
- evalscope-1.0.0/evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +74 -0
- evalscope-1.0.0/evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +92 -0
- evalscope-1.0.0/evalscope/benchmarks/winogrande/winogrande_adapter.py +34 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/cli/cli.py +2 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/cli/start_server.py +6 -3
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/collections/__init__.py +2 -10
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/collections/sampler.py +10 -10
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/collections/schema.py +13 -11
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/config.py +95 -54
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/constants.py +29 -61
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/evaluator/__init__.py +1 -1
- evalscope-1.0.0/evalscope/evaluator/evaluator.py +337 -0
- evalscope-1.0.0/evalscope/filters/__init__.py +2 -0
- evalscope-1.0.0/evalscope/filters/extraction.py +126 -0
- evalscope-1.0.0/evalscope/filters/selection.py +57 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/__init__.py +13 -13
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/llm_judge.py +32 -30
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/math_parser.py +27 -22
- evalscope-1.0.0/evalscope/metrics/metric.py +307 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/metrics.py +22 -18
- {evalscope-0.17.1/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models → evalscope-1.0.0/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +4 -2
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +9 -13
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +3 -2
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +2 -2
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +4 -2
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +10 -5
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +4 -2
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +15 -9
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +4 -2
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +15 -10
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +9 -6
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +2 -2
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +4 -2
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +4 -2
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +3 -9
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +16 -10
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +3 -2
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +4 -2
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +8 -4
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +47 -25
- {evalscope-0.17.1/evalscope/third_party/thinkbench/tools → evalscope-1.0.0/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +12 -7
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +23 -17
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +33 -23
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +46 -30
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +69 -37
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +7 -5
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +6 -4
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +7 -5
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +3 -2
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +5 -2
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +17 -13
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +35 -19
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +14 -12
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +63 -52
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +63 -38
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +6 -3
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +6 -2
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +3 -2
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +15 -13
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +3 -2
- evalscope-1.0.0/evalscope/models/__init__.py +26 -0
- evalscope-1.0.0/evalscope/models/mockllm.py +65 -0
- evalscope-1.0.0/evalscope/models/model_apis.py +47 -0
- evalscope-1.0.0/evalscope/models/modelscope.py +455 -0
- evalscope-1.0.0/evalscope/models/openai_compatible.py +123 -0
- evalscope-1.0.0/evalscope/models/text2image_model.py +124 -0
- evalscope-1.0.0/evalscope/models/utils/openai.py +698 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/benchmark.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/http_client.py +4 -2
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/plugin/api/custom_api.py +5 -4
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/plugin/api/openai_api.py +11 -9
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/plugin/datasets/custom.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/plugin/datasets/flickr8k.py +1 -1
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/plugin/datasets/kontext_bench.py +1 -1
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/plugin/datasets/line_by_line.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/plugin/datasets/longalpaca.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/plugin/datasets/openqa.py +4 -2
- evalscope-1.0.0/evalscope/perf/utils/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/utils/benchmark_util.py +7 -5
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/utils/db_util.py +9 -6
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/utils/local_server.py +8 -3
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/utils/rich_display.py +16 -10
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/report/__init__.py +2 -2
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/report/combinator.py +18 -12
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/report/generator.py +101 -6
- evalscope-0.17.1/evalscope/report/utils.py → evalscope-1.0.0/evalscope/report/report.py +8 -6
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/run.py +26 -44
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/summarizer.py +1 -1
- evalscope-1.0.0/evalscope/third_party/thinkbench/tools/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/utils/__init__.py +21 -2
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/utils/chat_service.py +2 -1
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/utils/deprecation_utils.py +12 -1
- evalscope-1.0.0/evalscope/utils/function_utils.py +29 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/utils/io_utils.py +100 -5
- evalscope-1.0.0/evalscope/utils/json_schema.py +208 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/utils/logger.py +51 -12
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/utils/model_utils.py +10 -7
- evalscope-1.0.0/evalscope/utils/multi_choices.py +271 -0
- evalscope-1.0.0/evalscope/utils/url_utils.py +65 -0
- evalscope-1.0.0/evalscope/version.py +4 -0
- {evalscope-0.17.1 → evalscope-1.0.0/evalscope.egg-info}/PKG-INFO +80 -44
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope.egg-info/SOURCES.txt +60 -42
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope.egg-info/requires.txt +18 -5
- {evalscope-0.17.1 → evalscope-1.0.0}/requirements/aigc.txt +1 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/requirements/dev.txt +1 -1
- {evalscope-0.17.1 → evalscope-1.0.0}/requirements/framework.txt +7 -4
- {evalscope-0.17.1 → evalscope-1.0.0}/setup.cfg +14 -5
- {evalscope-0.17.1 → evalscope-1.0.0}/tests/aigc/test_t2i.py +22 -4
- evalscope-1.0.0/tests/benchmark/test_eval.py +386 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/tests/cli/test_all.py +3 -5
- {evalscope-0.17.1 → evalscope-1.0.0}/tests/cli/test_collection.py +13 -4
- {evalscope-0.17.1 → evalscope-1.0.0}/tests/cli/test_custom.py +22 -15
- evalscope-1.0.0/tests/rag/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/tests/rag/test_clip_benchmark.py +1 -0
- evalscope-1.0.0/tests/vlm/__init__.py +1 -0
- evalscope-0.17.1/evalscope/__init__.py +0 -5
- evalscope-0.17.1/evalscope/benchmarks/aigc/t2i/base.py +0 -56
- evalscope-0.17.1/evalscope/benchmarks/aigc/t2i/evalmuse_adapter.py +0 -78
- evalscope-0.17.1/evalscope/benchmarks/aigc/t2i/genai_bench_adapter.py +0 -58
- evalscope-0.17.1/evalscope/benchmarks/aigc/t2i/hpdv2_adapter.py +0 -57
- evalscope-0.17.1/evalscope/benchmarks/aigc/t2i/tifa_adapter.py +0 -37
- evalscope-0.17.1/evalscope/benchmarks/aime/aime24_adapter.py +0 -52
- evalscope-0.17.1/evalscope/benchmarks/aime/aime25_adapter.py +0 -52
- evalscope-0.17.1/evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +0 -107
- evalscope-0.17.1/evalscope/benchmarks/arc/ai2_arc.py +0 -151
- evalscope-0.17.1/evalscope/benchmarks/arc/arc_adapter.py +0 -159
- evalscope-0.17.1/evalscope/benchmarks/arena_hard/arena_hard_adapter.py +0 -122
- evalscope-0.17.1/evalscope/benchmarks/bbh/bbh_adapter.py +0 -247
- evalscope-0.17.1/evalscope/benchmarks/benchmark.py +0 -81
- evalscope-0.17.1/evalscope/benchmarks/bfcl/bfcl_adapter.py +0 -237
- evalscope-0.17.1/evalscope/benchmarks/ceval/ceval_adapter.py +0 -238
- evalscope-0.17.1/evalscope/benchmarks/ceval/ceval_exam.py +0 -146
- evalscope-0.17.1/evalscope/benchmarks/cmmlu/cmmlu.py +0 -161
- evalscope-0.17.1/evalscope/benchmarks/cmmlu/cmmlu_adapter.py +0 -213
- evalscope-0.17.1/evalscope/benchmarks/cmmlu/samples.jsonl +0 -5
- evalscope-0.17.1/evalscope/benchmarks/competition_math/competition_math.py +0 -79
- evalscope-0.17.1/evalscope/benchmarks/competition_math/competition_math_adapter.py +0 -125
- evalscope-0.17.1/evalscope/benchmarks/data_adapter.py +0 -528
- evalscope-0.17.1/evalscope/benchmarks/data_collection/data_collection_adapter.py +0 -72
- evalscope-0.17.1/evalscope/benchmarks/docmath/docmath_adapter.py +0 -85
- evalscope-0.17.1/evalscope/benchmarks/filters.py +0 -59
- evalscope-0.17.1/evalscope/benchmarks/frames/frames_adapter.py +0 -91
- evalscope-0.17.1/evalscope/benchmarks/general_mcq/general_mcq_adapter.py +0 -119
- evalscope-0.17.1/evalscope/benchmarks/general_qa/general_qa_adapter.py +0 -155
- evalscope-0.17.1/evalscope/benchmarks/gpqa/gpqa_adapter.py +0 -129
- evalscope-0.17.1/evalscope/benchmarks/gsm8k/gsm8k.py +0 -121
- evalscope-0.17.1/evalscope/benchmarks/gsm8k/gsm8k_adapter.py +0 -156
- evalscope-0.17.1/evalscope/benchmarks/hellaswag/hellaswag.py +0 -112
- evalscope-0.17.1/evalscope/benchmarks/hellaswag/hellaswag_adapter.py +0 -151
- evalscope-0.17.1/evalscope/benchmarks/hle/hle_adapter.py +0 -118
- evalscope-0.17.1/evalscope/benchmarks/humaneval/humaneval.py +0 -79
- evalscope-0.17.1/evalscope/benchmarks/humaneval/humaneval_adapter.py +0 -93
- evalscope-0.17.1/evalscope/benchmarks/ifeval/ifeval_adapter.py +0 -54
- evalscope-0.17.1/evalscope/benchmarks/iquiz/iquiz_adapter.py +0 -70
- evalscope-0.17.1/evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +0 -88
- evalscope-0.17.1/evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +0 -82
- evalscope-0.17.1/evalscope/benchmarks/math_500/math_500_adapter.py +0 -58
- evalscope-0.17.1/evalscope/benchmarks/mmlu/mmlu.py +0 -160
- evalscope-0.17.1/evalscope/benchmarks/mmlu/mmlu_adapter.py +0 -280
- evalscope-0.17.1/evalscope/benchmarks/mmlu/samples.jsonl +0 -5
- evalscope-0.17.1/evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +0 -113
- evalscope-0.17.1/evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +0 -185
- evalscope-0.17.1/evalscope/benchmarks/musr/musr_adapter.py +0 -74
- evalscope-0.17.1/evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +0 -348
- evalscope-0.17.1/evalscope/benchmarks/process_bench/critique_template.txt +0 -13
- evalscope-0.17.1/evalscope/benchmarks/process_bench/process_bench_adapter.py +0 -102
- evalscope-0.17.1/evalscope/benchmarks/race/race.py +0 -104
- evalscope-0.17.1/evalscope/benchmarks/race/race_adapter.py +0 -135
- evalscope-0.17.1/evalscope/benchmarks/race/samples.jsonl +0 -5
- evalscope-0.17.1/evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +0 -209
- evalscope-0.17.1/evalscope/benchmarks/super_gpqa/zero_shot_prompt.txt +0 -4
- evalscope-0.17.1/evalscope/benchmarks/tau_bench/tau_bench_adapter.py +0 -110
- evalscope-0.17.1/evalscope/benchmarks/tool_bench/tool_bench_adapter.py +0 -81
- evalscope-0.17.1/evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -89
- evalscope-0.17.1/evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +0 -142
- evalscope-0.17.1/evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -163
- evalscope-0.17.1/evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +0 -287
- evalscope-0.17.1/evalscope/benchmarks/utils.py +0 -60
- evalscope-0.17.1/evalscope/benchmarks/winogrande/winogrande_adapter.py +0 -60
- evalscope-0.17.1/evalscope/collections/evaluator.py +0 -375
- evalscope-0.17.1/evalscope/evaluator/evaluator.py +0 -483
- evalscope-0.17.1/evalscope/metrics/completion_parsers.py +0 -227
- evalscope-0.17.1/evalscope/metrics/named_metrics.py +0 -55
- evalscope-0.17.1/evalscope/metrics/t2v_metrics/__init__.py +0 -52
- evalscope-0.17.1/evalscope/models/__init__.py +0 -49
- evalscope-0.17.1/evalscope/models/adapters/__init__.py +0 -14
- evalscope-0.17.1/evalscope/models/adapters/base_adapter.py +0 -84
- evalscope-0.17.1/evalscope/models/adapters/bfcl_adapter.py +0 -246
- evalscope-0.17.1/evalscope/models/adapters/chat_adapter.py +0 -207
- evalscope-0.17.1/evalscope/models/adapters/choice_adapter.py +0 -222
- evalscope-0.17.1/evalscope/models/adapters/custom_adapter.py +0 -71
- evalscope-0.17.1/evalscope/models/adapters/server_adapter.py +0 -236
- evalscope-0.17.1/evalscope/models/adapters/t2i_adapter.py +0 -79
- evalscope-0.17.1/evalscope/models/adapters/tau_bench_adapter.py +0 -189
- evalscope-0.17.1/evalscope/models/custom/__init__.py +0 -4
- evalscope-0.17.1/evalscope/models/custom/custom_model.py +0 -50
- evalscope-0.17.1/evalscope/models/custom/dummy_model.py +0 -99
- evalscope-0.17.1/evalscope/models/local_model.py +0 -128
- evalscope-0.17.1/evalscope/models/register.py +0 -41
- evalscope-0.17.1/evalscope/version.py +0 -4
- evalscope-0.17.1/tests/cli/test_run.py +0 -489
- {evalscope-0.17.1 → evalscope-1.0.0}/LICENSE +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/MANIFEST.in +0 -0
- {evalscope-0.17.1/evalscope/backend → evalscope-1.0.0/evalscope/api}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/app/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/app/app.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/app/arguments.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/app/constants.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/app/ui/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/app/ui/sidebar.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/app/ui/visualization.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/app/utils/localization.py +0 -0
- {evalscope-0.17.1/evalscope/backend/rag_eval/clip_benchmark/tasks → evalscope-1.0.0/evalscope/backend}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/base.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/opencompass/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/opencompass/tasks/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/opencompass/tasks/eval_api.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/opencompass/tasks/eval_datasets.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/backend_manager.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/clip_benchmark/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/clip_benchmark/arguments.py +0 -0
- {evalscope-0.17.1/evalscope/backend/rag_eval/utils → evalscope-1.0.0/evalscope/backend/rag_eval/clip_benchmark/tasks}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/clip_benchmark/tasks/image_caption.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_classification.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_retrieval.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/clip_benchmark/utils/webdataset_convert.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/clip_benchmark/utils/webdatasets.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/cmteb/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/cmteb/arguments.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/cmteb/base.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/cmteb/task_template.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/cmteb/tasks/Classification.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/cmteb/tasks/Clustering.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/cmteb/tasks/CustomTask.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/cmteb/tasks/PairClassification.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/cmteb/tasks/Reranking.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/cmteb/tasks/Retrieval.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/cmteb/tasks/STS.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/cmteb/tasks/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/ragas/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/ragas/arguments.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/ragas/prompts/persona_prompt.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/ragas/tasks/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/aigc → evalscope-1.0.0/evalscope/backend/rag_eval/utils}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/utils/clip.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/rag_eval/utils/tools.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/vlm_eval_kit/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/backend/vlm_eval_kit/backend_manager.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/aigc/t2i → evalscope-1.0.0/evalscope/benchmarks/aigc}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/aime → evalscope-1.0.0/evalscope/benchmarks/aigc/i2i}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/alpaca_eval → evalscope-1.0.0/evalscope/benchmarks/aigc/t2i}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/arena_hard → evalscope-1.0.0/evalscope/benchmarks/aime}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/bfcl → evalscope-1.0.0/evalscope/benchmarks/alpaca_eval}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/arc/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/chinese_simple_qa → evalscope-1.0.0/evalscope/benchmarks/arena_hard}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/boolean_expressions.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/causal_judgement.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/date_understanding.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/disambiguation_qa.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/dyck_languages.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/formal_fallacies.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/geometric_shapes.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/hyperbaton.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_five_objects.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_seven_objects.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_three_objects.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/movie_recommendation.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/multistep_arithmetic_two.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/navigate.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/object_counting.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/penguins_in_a_table.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/reasoning_about_colored_objects.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/ruin_names.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/salient_translation_error_detection.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/snarks.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/sports_understanding.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/temporal_sequences.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_five_objects.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_seven_objects.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_three_objects.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/web_of_lies.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/bbh/cot_prompts/word_sorting.txt +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/data_collection → evalscope-1.0.0/evalscope/benchmarks/bfcl}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/ceval/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/docmath → evalscope-1.0.0/evalscope/benchmarks/chinese_simple_qa}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/cmmlu/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/competition_math/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/drop → evalscope-1.0.0/evalscope/benchmarks/data_collection}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/frames → evalscope-1.0.0/evalscope/benchmarks/docmath}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/general_arena → evalscope-1.0.0/evalscope/benchmarks/drop}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/drop/utils.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/general_mcq → evalscope-1.0.0/evalscope/benchmarks/frames}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/frames/utils.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/gpqa → evalscope-1.0.0/evalscope/benchmarks/general_arena}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/hle → evalscope-1.0.0/evalscope/benchmarks/general_mcq}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/general_qa/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/ifeval → evalscope-1.0.0/evalscope/benchmarks/gpqa}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/gsm8k/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/hellaswag/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/iquiz → evalscope-1.0.0/evalscope/benchmarks/hle}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/humaneval/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/live_code_bench → evalscope-1.0.0/evalscope/benchmarks/ifeval}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/ifeval/instructions_util.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/maritime_bench → evalscope-1.0.0/evalscope/benchmarks/iquiz}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/math_500 → evalscope-1.0.0/evalscope/benchmarks/live_code_bench}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/live_code_bench/extract_utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/live_code_bench/pass_k_utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/live_code_bench/prompts.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/mmlu_pro → evalscope-1.0.0/evalscope/benchmarks/maritime_bench}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/mmlu_redux → evalscope-1.0.0/evalscope/benchmarks/math_500}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/mmlu/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/musr → evalscope-1.0.0/evalscope/benchmarks/mmlu_pro}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/needle_haystack → evalscope-1.0.0/evalscope/benchmarks/mmlu_redux}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/process_bench → evalscope-1.0.0/evalscope/benchmarks/musr}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/simple_qa → evalscope-1.0.0/evalscope/benchmarks/needle_haystack}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/needle_haystack/utils.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/super_gpqa → evalscope-1.0.0/evalscope/benchmarks/process_bench}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/race/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/tau_bench → evalscope-1.0.0/evalscope/benchmarks/simple_qa}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/tool_bench → evalscope-1.0.0/evalscope/benchmarks/super_gpqa}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/benchmarks/winogrande → evalscope-1.0.0/evalscope/benchmarks/tau_bench}/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/metrics/t2v_metrics/models → evalscope-1.0.0/evalscope/benchmarks/tool_bench}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/tool_bench/utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/trivia_qa/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/trivia_qa/samples.jsonl +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/benchmarks/truthful_qa/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model → evalscope-1.0.0/evalscope/benchmarks/winogrande}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/cli/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/cli/base.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/cli/start_app.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/cli/start_eval.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/cli/start_perf.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/bundled_rouge_score/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/bundled_rouge_score/rouge_scorer.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/rouge_metric.py +0 -0
- {evalscope-0.17.1/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward → evalscope-1.0.0/evalscope/metrics/t2v_metrics}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/clipscore.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/constants.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/itmscore.py +0 -0
- {evalscope-0.17.1/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5 → evalscope-1.0.0/evalscope/metrics/t2v_metrics/models}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/clipscore_models/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/base_model.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/itmscore_models/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/perf → evalscope-1.0.0/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/model.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/__init__.py +0 -0
- {evalscope-0.17.1/evalscope/perf/utils → evalscope-1.0.0/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/builder.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_projector/builder.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/default.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_flant5xl.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt2.7b.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt6.7b.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_coco.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xl.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xxl.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna13b.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_no_prefix.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_prefix.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_vitL.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xxl.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt2.7b.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt6.7b.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_vitL.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna13b.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna7b.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config.json +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config_albef.json +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_large_config.json +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/base_model.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_image_text_matching.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_caption.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_classification.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_feature_extractor.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_pretrain.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/base_processor.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/blip_processors.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/score.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/metrics/t2v_metrics/vqascore.py +0 -0
- {evalscope-0.17.1/tests/rag → evalscope-1.0.0/evalscope/perf}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/arguments.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/main.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/plugin/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/plugin/api/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/plugin/api/base.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/plugin/api/dashscope_api.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/plugin/api/default_api.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/plugin/datasets/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/plugin/datasets/base.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/plugin/datasets/random_dataset.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/plugin/datasets/random_vl_dataset.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/plugin/datasets/speed_benchmark.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/plugin/registry.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/utils/analysis_result.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/utils/handler.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/perf/utils/log_utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/longbench_write/README.md +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/longbench_write/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/longbench_write/default_task.json +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/longbench_write/default_task.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/longbench_write/eval.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/longbench_write/infer.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/longbench_write/longbench_write.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/longbench_write/resources/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/longbench_write/resources/judge.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/longbench_write/resources/longbench_write.jsonl +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/longbench_write/resources/longbench_write_en.jsonl +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/longbench_write/resources/longwrite_ruler.jsonl +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/longbench_write/tools/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/longbench_write/tools/data_etl.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/longbench_write/tools/openai_api.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/longbench_write/utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/thinkbench/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/thinkbench/eval.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/thinkbench/infer.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/thinkbench/resources/critique_template.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/thinkbench/resources/reformat_template.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/thinkbench/tools/llm.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/thinkbench/tools/utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/toolbench_static/README.md +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/toolbench_static/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/toolbench_static/config_default.json +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/toolbench_static/config_default.yaml +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/toolbench_static/eval.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/toolbench_static/infer.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/toolbench_static/llm/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/toolbench_static/requirements.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/third_party/toolbench_static/toolbench_static.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/utils/argument_utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope/utils/import_utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope.egg-info/dependency_links.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope.egg-info/entry_points.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope.egg-info/not-zip-safe +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/evalscope.egg-info/top_level.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/requirements/app.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/requirements/docs.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/requirements/opencompass.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/requirements/perf.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/requirements/rag.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/requirements/vlmeval.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/requirements.txt +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/setup.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/tests/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/tests/aigc/__init__.py +0 -0
- {evalscope-0.17.1/tests/cli → evalscope-1.0.0/tests/benchmark}/__init__.py +0 -0
- {evalscope-0.17.1/tests/perf → evalscope-1.0.0/tests/cli}/__init__.py +0 -0
- {evalscope-0.17.1/tests/swift → evalscope-1.0.0/tests/perf}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/tests/perf/test_perf.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/tests/rag/test_mteb.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/tests/rag/test_ragas.py +0 -0
- {evalscope-0.17.1/tests/vlm → evalscope-1.0.0/tests/swift}/__init__.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/tests/swift/test_run_swift_eval.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/tests/swift/test_run_swift_vlm_eval.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/tests/swift/test_run_swift_vlm_jugde_eval.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/tests/test_run_all.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/tests/utils.py +0 -0
- {evalscope-0.17.1 → evalscope-1.0.0}/tests/vlm/test_vlmeval.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: evalscope
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 1.0.0
|
|
4
4
|
Summary: EvalScope: Lightweight LLMs Evaluation Framework
|
|
5
5
|
Home-page: https://github.com/modelscope/evalscope
|
|
6
6
|
Author: ModelScope team
|
|
@@ -57,9 +57,9 @@ License-File: LICENSE
|
|
|
57
57
|
- [📝 Introduction](#-introduction)
|
|
58
58
|
- [☎ User Groups](#-user-groups)
|
|
59
59
|
- [🎉 News](#-news)
|
|
60
|
-
- [🛠️
|
|
61
|
-
- [Method 1
|
|
62
|
-
- [Method 2
|
|
60
|
+
- [🛠️ Environment Setup](#️-environment-setup)
|
|
61
|
+
- [Method 1. Install via pip](#method-1-install-via-pip)
|
|
62
|
+
- [Method 2. Install from source](#method-2-install-from-source)
|
|
63
63
|
- [🚀 Quick Start](#-quick-start)
|
|
64
64
|
- [Method 1. Using Command Line](#method-1-using-command-line)
|
|
65
65
|
- [Method 2. Using Python Code](#method-2-using-python-code)
|
|
@@ -140,6 +140,13 @@ Please scan the QR code below to join our community groups:
|
|
|
140
140
|
|
|
141
141
|
|
|
142
142
|
## 🎉 News
|
|
143
|
+
|
|
144
|
+
> [!IMPORTANT]
|
|
145
|
+
> **Version 1.0 Refactoring**
|
|
146
|
+
>
|
|
147
|
+
> Version 1.0 introduces a major overhaul of the evaluation framework, establishing a new, more modular and extensible API layer under `evalscope/api`. Key improvements include standardized data models for benchmarks, samples, and results; a registry-based design for components such as benchmarks and metrics; and a rewritten core evaluator that orchestrates the new architecture. Existing benchmark adapters have been migrated to this API, resulting in cleaner, more consistent, and easier-to-maintain implementations.
|
|
148
|
+
|
|
149
|
+
- 🔥 **[2025.08.22]** Version 1.0 Refactoring.
|
|
143
150
|
- 🔥 **[2025.07.18]** The model stress testing now supports randomly generating image-text data for multimodal model evaluation. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#id4).
|
|
144
151
|
- 🔥 **[2025.07.16]** Support for [τ-bench](https://github.com/sierra-research/tau-bench) has been added, enabling the evaluation of AI Agent performance and reliability in real-world scenarios involving dynamic user and tool interactions. For usage instructions, please refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/llm.html#bench).
|
|
145
152
|
- 🔥 **[2025.07.14]** Support for "Humanity's Last Exam" ([Humanity's-Last-Exam](https://modelscope.cn/datasets/cais/hle)), a highly challenging evaluation benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/llm.html#humanity-s-last-exam).
|
|
@@ -150,12 +157,12 @@ Please scan the QR code below to join our community groups:
|
|
|
150
157
|
- 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html).
|
|
151
158
|
- 🔥 **[2025.05.16]** Model service performance stress testing now supports setting various levels of concurrency and outputs a performance test report. [Reference example](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/quick_start.html#id3).
|
|
152
159
|
- 🔥 **[2025.05.13]** Added support for the [ToolBench-Static](https://modelscope.cn/datasets/AI-ModelScope/ToolBench-Static) dataset to evaluate model's tool-calling capabilities. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) for usage instructions. Also added support for the [DROP](https://modelscope.cn/datasets/AI-ModelScope/DROP/dataPeview) and [Winogrande](https://modelscope.cn/datasets/AI-ModelScope/winogrande_val) benchmarks to assess the reasoning capabilities of models.
|
|
160
|
+
<details><summary>More</summary>
|
|
161
|
+
|
|
153
162
|
- 🔥 **[2025.04.29]** Added Qwen3 Evaluation Best Practices, [welcome to read 📖](https://evalscope.readthedocs.io/en/latest/best_practice/qwen3.html)
|
|
154
163
|
- 🔥 **[2025.04.27]** Support for text-to-image evaluation: Supports 8 metrics including MPS, HPSv2.1Score, etc., and evaluation benchmarks such as EvalMuse, GenAI-Bench. Refer to the [user documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/t2i.html) for more details.
|
|
155
164
|
- 🔥 **[2025.04.10]** Model service stress testing tool now supports the `/v1/completions` endpoint (the default endpoint for vLLM benchmarking)
|
|
156
165
|
- 🔥 **[2025.04.08]** Support for evaluating embedding model services compatible with the OpenAI API has been added. For more details, check the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html#configure-evaluation-parameters).
|
|
157
|
-
<details><summary>More</summary>
|
|
158
|
-
|
|
159
166
|
- 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
|
|
160
167
|
- 🔥 **[2025.03.20]** The model inference service stress testing now supports generating prompts of specified length using random values. Refer to the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#using-the-random-dataset) for more details.
|
|
161
168
|
- 🔥 **[2025.03.13]** Added support for the [LiveCodeBench](https://www.modelscope.cn/datasets/AI-ModelScope/code_generation_lite/summary) code evaluation benchmark, which can be used by specifying `live_code_bench`. Supports evaluating QwQ-32B on LiveCodeBench, refer to the [best practices](https://evalscope.readthedocs.io/en/latest/best_practice/eval_qwq.html).
|
|
@@ -188,58 +195,87 @@ Please scan the QR code below to join our community groups:
|
|
|
188
195
|
|
|
189
196
|
</details>
|
|
190
197
|
|
|
191
|
-
## 🛠️
|
|
192
|
-
|
|
193
|
-
|
|
198
|
+
## 🛠️ Environment Setup
|
|
199
|
+
|
|
200
|
+
### Method 1. Install via pip
|
|
201
|
+
|
|
202
|
+
We recommend using conda to manage your environment and pip to install dependencies. This allows you to use the latest evalscope PyPI package.
|
|
194
203
|
|
|
195
204
|
1. Create a conda environment (optional)
|
|
205
|
+
```shell
|
|
206
|
+
# Python 3.10 is recommended
|
|
207
|
+
conda create -n evalscope python=3.10
|
|
208
|
+
|
|
209
|
+
# Activate the conda environment
|
|
210
|
+
conda activate evalscope
|
|
211
|
+
```
|
|
212
|
+
2. Install dependencies via pip
|
|
213
|
+
```shell
|
|
214
|
+
pip install evalscope
|
|
215
|
+
```
|
|
216
|
+
3. Install additional dependencies (optional)
|
|
217
|
+
- To use model service inference benchmarking features, install the perf dependency:
|
|
196
218
|
```shell
|
|
197
|
-
|
|
198
|
-
conda create -n evalscope python=3.10
|
|
199
|
-
# Activate the conda environment
|
|
200
|
-
conda activate evalscope
|
|
219
|
+
pip install 'evalscope[perf]'
|
|
201
220
|
```
|
|
202
|
-
|
|
203
|
-
2. Install dependencies using pip
|
|
221
|
+
- To use visualization features, install the app dependency:
|
|
204
222
|
```shell
|
|
205
|
-
pip install evalscope
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
pip install 'evalscope[
|
|
210
|
-
pip install 'evalscope[
|
|
211
|
-
pip install 'evalscope[
|
|
212
|
-
|
|
223
|
+
pip install 'evalscope[app]'
|
|
224
|
+
```
|
|
225
|
+
- If you need to use other evaluation backends, you can install OpenCompass, VLMEvalKit, or RAGEval as needed:
|
|
226
|
+
```shell
|
|
227
|
+
pip install 'evalscope[opencompass]'
|
|
228
|
+
pip install 'evalscope[vlmeval]'
|
|
229
|
+
pip install 'evalscope[rag]'
|
|
230
|
+
```
|
|
231
|
+
- To install all dependencies:
|
|
232
|
+
```shell
|
|
233
|
+
pip install 'evalscope[all]'
|
|
213
234
|
```
|
|
214
235
|
|
|
215
|
-
> [!
|
|
216
|
-
>
|
|
236
|
+
> [!NOTE]
|
|
237
|
+
> The project has been renamed to `evalscope`. For version `v0.4.3` or earlier, you can install it with:
|
|
217
238
|
> ```shell
|
|
218
|
-
>
|
|
239
|
+
> pip install llmuses<=0.4.3
|
|
219
240
|
> ```
|
|
220
|
-
>
|
|
221
|
-
> ```
|
|
241
|
+
> Then, import related dependencies using `llmuses`:
|
|
242
|
+
> ```python
|
|
222
243
|
> from llmuses import ...
|
|
223
244
|
> ```
|
|
224
245
|
|
|
225
|
-
### Method 2
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
git clone https://github.com/modelscope/evalscope.git
|
|
229
|
-
```
|
|
246
|
+
### Method 2. Install from source
|
|
247
|
+
|
|
248
|
+
Installing from source allows you to use the latest code and makes it easier for further development and debugging.
|
|
230
249
|
|
|
250
|
+
1. Clone the source code
|
|
251
|
+
```shell
|
|
252
|
+
git clone https://github.com/modelscope/evalscope.git
|
|
253
|
+
```
|
|
231
254
|
2. Install dependencies
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
255
|
+
```shell
|
|
256
|
+
cd evalscope/
|
|
257
|
+
|
|
258
|
+
pip install -e .
|
|
259
|
+
```
|
|
260
|
+
3. Install additional dependencies
|
|
261
|
+
- To use model service inference benchmarking features, install the perf dependency:
|
|
262
|
+
```shell
|
|
263
|
+
pip install '.[perf]'
|
|
264
|
+
```
|
|
265
|
+
- To use visualization features, install the app dependency:
|
|
266
|
+
```shell
|
|
267
|
+
pip install '.[app]'
|
|
268
|
+
```
|
|
269
|
+
- If you need to use other evaluation backends, you can install OpenCompass, VLMEvalKit, or RAGEval as needed:
|
|
270
|
+
```shell
|
|
271
|
+
pip install '.[opencompass]'
|
|
272
|
+
pip install '.[vlmeval]'
|
|
273
|
+
pip install '.[rag]'
|
|
274
|
+
```
|
|
275
|
+
- To install all dependencies:
|
|
276
|
+
```shell
|
|
277
|
+
pip install '.[all]'
|
|
278
|
+
```
|
|
243
279
|
|
|
244
280
|
|
|
245
281
|
## 🚀 Quick Start
|
|
@@ -28,9 +28,9 @@
|
|
|
28
28
|
- [📝 Introduction](#-introduction)
|
|
29
29
|
- [☎ User Groups](#-user-groups)
|
|
30
30
|
- [🎉 News](#-news)
|
|
31
|
-
- [🛠️
|
|
32
|
-
- [Method 1
|
|
33
|
-
- [Method 2
|
|
31
|
+
- [🛠️ Environment Setup](#️-environment-setup)
|
|
32
|
+
- [Method 1. Install via pip](#method-1-install-via-pip)
|
|
33
|
+
- [Method 2. Install from source](#method-2-install-from-source)
|
|
34
34
|
- [🚀 Quick Start](#-quick-start)
|
|
35
35
|
- [Method 1. Using Command Line](#method-1-using-command-line)
|
|
36
36
|
- [Method 2. Using Python Code](#method-2-using-python-code)
|
|
@@ -111,6 +111,13 @@ Please scan the QR code below to join our community groups:
|
|
|
111
111
|
|
|
112
112
|
|
|
113
113
|
## 🎉 News
|
|
114
|
+
|
|
115
|
+
> [!IMPORTANT]
|
|
116
|
+
> **Version 1.0 Refactoring**
|
|
117
|
+
>
|
|
118
|
+
> Version 1.0 introduces a major overhaul of the evaluation framework, establishing a new, more modular and extensible API layer under `evalscope/api`. Key improvements include standardized data models for benchmarks, samples, and results; a registry-based design for components such as benchmarks and metrics; and a rewritten core evaluator that orchestrates the new architecture. Existing benchmark adapters have been migrated to this API, resulting in cleaner, more consistent, and easier-to-maintain implementations.
|
|
119
|
+
|
|
120
|
+
- 🔥 **[2025.08.22]** Version 1.0 Refactoring.
|
|
114
121
|
- 🔥 **[2025.07.18]** The model stress testing now supports randomly generating image-text data for multimodal model evaluation. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#id4).
|
|
115
122
|
- 🔥 **[2025.07.16]** Support for [τ-bench](https://github.com/sierra-research/tau-bench) has been added, enabling the evaluation of AI Agent performance and reliability in real-world scenarios involving dynamic user and tool interactions. For usage instructions, please refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/llm.html#bench).
|
|
116
123
|
- 🔥 **[2025.07.14]** Support for "Humanity's Last Exam" ([Humanity's-Last-Exam](https://modelscope.cn/datasets/cais/hle)), a highly challenging evaluation benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/llm.html#humanity-s-last-exam).
|
|
@@ -121,12 +128,12 @@ Please scan the QR code below to join our community groups:
|
|
|
121
128
|
- 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html).
|
|
122
129
|
- 🔥 **[2025.05.16]** Model service performance stress testing now supports setting various levels of concurrency and outputs a performance test report. [Reference example](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/quick_start.html#id3).
|
|
123
130
|
- 🔥 **[2025.05.13]** Added support for the [ToolBench-Static](https://modelscope.cn/datasets/AI-ModelScope/ToolBench-Static) dataset to evaluate model's tool-calling capabilities. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) for usage instructions. Also added support for the [DROP](https://modelscope.cn/datasets/AI-ModelScope/DROP/dataPeview) and [Winogrande](https://modelscope.cn/datasets/AI-ModelScope/winogrande_val) benchmarks to assess the reasoning capabilities of models.
|
|
131
|
+
<details><summary>More</summary>
|
|
132
|
+
|
|
124
133
|
- 🔥 **[2025.04.29]** Added Qwen3 Evaluation Best Practices, [welcome to read 📖](https://evalscope.readthedocs.io/en/latest/best_practice/qwen3.html)
|
|
125
134
|
- 🔥 **[2025.04.27]** Support for text-to-image evaluation: Supports 8 metrics including MPS, HPSv2.1Score, etc., and evaluation benchmarks such as EvalMuse, GenAI-Bench. Refer to the [user documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/t2i.html) for more details.
|
|
126
135
|
- 🔥 **[2025.04.10]** Model service stress testing tool now supports the `/v1/completions` endpoint (the default endpoint for vLLM benchmarking)
|
|
127
136
|
- 🔥 **[2025.04.08]** Support for evaluating embedding model services compatible with the OpenAI API has been added. For more details, check the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html#configure-evaluation-parameters).
|
|
128
|
-
<details><summary>More</summary>
|
|
129
|
-
|
|
130
137
|
- 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
|
|
131
138
|
- 🔥 **[2025.03.20]** The model inference service stress testing now supports generating prompts of specified length using random values. Refer to the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#using-the-random-dataset) for more details.
|
|
132
139
|
- 🔥 **[2025.03.13]** Added support for the [LiveCodeBench](https://www.modelscope.cn/datasets/AI-ModelScope/code_generation_lite/summary) code evaluation benchmark, which can be used by specifying `live_code_bench`. Supports evaluating QwQ-32B on LiveCodeBench, refer to the [best practices](https://evalscope.readthedocs.io/en/latest/best_practice/eval_qwq.html).
|
|
@@ -159,58 +166,87 @@ Please scan the QR code below to join our community groups:
|
|
|
159
166
|
|
|
160
167
|
</details>
|
|
161
168
|
|
|
162
|
-
## 🛠️
|
|
163
|
-
|
|
164
|
-
|
|
169
|
+
## 🛠️ Environment Setup
|
|
170
|
+
|
|
171
|
+
### Method 1. Install via pip
|
|
172
|
+
|
|
173
|
+
We recommend using conda to manage your environment and pip to install dependencies. This allows you to use the latest evalscope PyPI package.
|
|
165
174
|
|
|
166
175
|
1. Create a conda environment (optional)
|
|
176
|
+
```shell
|
|
177
|
+
# Python 3.10 is recommended
|
|
178
|
+
conda create -n evalscope python=3.10
|
|
179
|
+
|
|
180
|
+
# Activate the conda environment
|
|
181
|
+
conda activate evalscope
|
|
182
|
+
```
|
|
183
|
+
2. Install dependencies via pip
|
|
184
|
+
```shell
|
|
185
|
+
pip install evalscope
|
|
186
|
+
```
|
|
187
|
+
3. Install additional dependencies (optional)
|
|
188
|
+
- To use model service inference benchmarking features, install the perf dependency:
|
|
167
189
|
```shell
|
|
168
|
-
|
|
169
|
-
conda create -n evalscope python=3.10
|
|
170
|
-
# Activate the conda environment
|
|
171
|
-
conda activate evalscope
|
|
190
|
+
pip install 'evalscope[perf]'
|
|
172
191
|
```
|
|
173
|
-
|
|
174
|
-
2. Install dependencies using pip
|
|
192
|
+
- To use visualization features, install the app dependency:
|
|
175
193
|
```shell
|
|
176
|
-
pip install evalscope
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
pip install 'evalscope[
|
|
181
|
-
pip install 'evalscope[
|
|
182
|
-
pip install 'evalscope[
|
|
183
|
-
|
|
194
|
+
pip install 'evalscope[app]'
|
|
195
|
+
```
|
|
196
|
+
- If you need to use other evaluation backends, you can install OpenCompass, VLMEvalKit, or RAGEval as needed:
|
|
197
|
+
```shell
|
|
198
|
+
pip install 'evalscope[opencompass]'
|
|
199
|
+
pip install 'evalscope[vlmeval]'
|
|
200
|
+
pip install 'evalscope[rag]'
|
|
201
|
+
```
|
|
202
|
+
- To install all dependencies:
|
|
203
|
+
```shell
|
|
204
|
+
pip install 'evalscope[all]'
|
|
184
205
|
```
|
|
185
206
|
|
|
186
|
-
> [!
|
|
187
|
-
>
|
|
207
|
+
> [!NOTE]
|
|
208
|
+
> The project has been renamed to `evalscope`. For version `v0.4.3` or earlier, you can install it with:
|
|
188
209
|
> ```shell
|
|
189
|
-
>
|
|
210
|
+
> pip install llmuses<=0.4.3
|
|
190
211
|
> ```
|
|
191
|
-
>
|
|
192
|
-
> ```
|
|
212
|
+
> Then, import related dependencies using `llmuses`:
|
|
213
|
+
> ```python
|
|
193
214
|
> from llmuses import ...
|
|
194
215
|
> ```
|
|
195
216
|
|
|
196
|
-
### Method 2
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
git clone https://github.com/modelscope/evalscope.git
|
|
200
|
-
```
|
|
217
|
+
### Method 2. Install from source
|
|
218
|
+
|
|
219
|
+
Installing from source allows you to use the latest code and makes it easier for further development and debugging.
|
|
201
220
|
|
|
221
|
+
1. Clone the source code
|
|
222
|
+
```shell
|
|
223
|
+
git clone https://github.com/modelscope/evalscope.git
|
|
224
|
+
```
|
|
202
225
|
2. Install dependencies
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
226
|
+
```shell
|
|
227
|
+
cd evalscope/
|
|
228
|
+
|
|
229
|
+
pip install -e .
|
|
230
|
+
```
|
|
231
|
+
3. Install additional dependencies
|
|
232
|
+
- To use model service inference benchmarking features, install the perf dependency:
|
|
233
|
+
```shell
|
|
234
|
+
pip install '.[perf]'
|
|
235
|
+
```
|
|
236
|
+
- To use visualization features, install the app dependency:
|
|
237
|
+
```shell
|
|
238
|
+
pip install '.[app]'
|
|
239
|
+
```
|
|
240
|
+
- If you need to use other evaluation backends, you can install OpenCompass, VLMEvalKit, or RAGEval as needed:
|
|
241
|
+
```shell
|
|
242
|
+
pip install '.[opencompass]'
|
|
243
|
+
pip install '.[vlmeval]'
|
|
244
|
+
pip install '.[rag]'
|
|
245
|
+
```
|
|
246
|
+
- To install all dependencies:
|
|
247
|
+
```shell
|
|
248
|
+
pip install '.[all]'
|
|
249
|
+
```
|
|
214
250
|
|
|
215
251
|
|
|
216
252
|
## 🚀 Quick Start
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
|
+
from evalscope.benchmarks import * # registered benchmarks
|
|
3
|
+
from evalscope.config import TaskConfig
|
|
4
|
+
from evalscope.filters import extraction, selection # registered filters
|
|
5
|
+
from evalscope.metrics import metric # registered metrics
|
|
6
|
+
from evalscope.models import model_apis # need for register model apis
|
|
7
|
+
from evalscope.run import run_task
|
|
8
|
+
from .version import __release_datetime__, __version__
|