evalscope 1.0.0__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- evalscope/api/benchmark/__init__.py +9 -1
- evalscope/api/benchmark/adapters/__init__.py +4 -0
- evalscope/api/benchmark/adapters/agent_adapter.py +8 -0
- evalscope/api/benchmark/adapters/default_data_adapter.py +75 -4
- evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
- evalscope/api/benchmark/adapters/multi_choice_adapter.py +5 -2
- evalscope/api/benchmark/adapters/ner_adapter.py +212 -0
- evalscope/api/benchmark/adapters/text2image_adapter.py +12 -10
- evalscope/api/benchmark/adapters/vision_language_adapter.py +8 -0
- evalscope/api/benchmark/benchmark.py +85 -2
- evalscope/api/benchmark/meta.py +10 -1
- evalscope/api/dataset/dataset.py +27 -6
- evalscope/api/dataset/loader.py +8 -3
- evalscope/api/evaluator/cache.py +31 -4
- evalscope/api/evaluator/evaluator.py +5 -0
- evalscope/api/evaluator/state.py +17 -1
- evalscope/api/messages/__init__.py +1 -0
- evalscope/api/messages/chat_message.py +52 -2
- evalscope/api/metric/__init__.py +1 -1
- evalscope/api/metric/metric.py +6 -1
- evalscope/api/metric/scorer.py +15 -7
- evalscope/api/mixin/__init__.py +1 -1
- evalscope/api/mixin/llm_judge_mixin.py +2 -0
- evalscope/api/mixin/sandbox_mixin.py +182 -0
- evalscope/api/model/generate_config.py +10 -6
- evalscope/api/model/model.py +5 -2
- evalscope/api/tool/tool_info.py +1 -1
- evalscope/app/app.py +3 -0
- evalscope/app/ui/multi_model.py +6 -1
- evalscope/app/ui/single_model.py +11 -5
- evalscope/app/utils/data_utils.py +8 -7
- evalscope/app/utils/env_utils.py +12 -0
- evalscope/app/utils/text_utils.py +14 -12
- evalscope/app/utils/visualization.py +2 -2
- evalscope/arguments.py +8 -4
- evalscope/backend/opencompass/backend_manager.py +0 -2
- evalscope/backend/rag_eval/utils/embedding.py +9 -1
- evalscope/benchmarks/aa_lcr/aa_lcr_adapter.py +205 -0
- evalscope/benchmarks/ai2d/ai2d_adapter.py +54 -0
- evalscope/benchmarks/aime/aime24_adapter.py +5 -0
- evalscope/benchmarks/aime/aime25_adapter.py +136 -1
- evalscope/benchmarks/aime/grader.py +307 -0
- evalscope/benchmarks/aime/math_normalize.py +189 -0
- evalscope/benchmarks/amc/amc_adapter.py +51 -0
- evalscope/benchmarks/arena_hard/arena_hard_adapter.py +1 -0
- evalscope/benchmarks/bbh/bbh_adapter.py +43 -17
- evalscope/benchmarks/bfcl/{bfcl_adapter.py → v3/bfcl_v3_adapter.py} +131 -19
- evalscope/benchmarks/bfcl/{generation.py → v3/generation.py} +9 -9
- evalscope/benchmarks/bfcl/v3/utils.py +23 -0
- evalscope/benchmarks/bfcl/v4/__init__.py +0 -0
- evalscope/benchmarks/bfcl/v4/bfcl_v4_adapter.py +229 -0
- evalscope/benchmarks/bfcl/v4/utils.py +410 -0
- evalscope/benchmarks/biomix_qa/__init__.py +0 -0
- evalscope/benchmarks/biomix_qa/biomix_qa_adapter.py +36 -0
- evalscope/benchmarks/blink/__init__.py +0 -0
- evalscope/benchmarks/blink/blink_adapter.py +61 -0
- evalscope/benchmarks/ceval/ceval_adapter.py +1 -2
- evalscope/benchmarks/chartqa/__init__.py +0 -0
- evalscope/benchmarks/chartqa/chartqa_adapter.py +80 -0
- evalscope/benchmarks/chartqa/utils.py +38 -0
- evalscope/benchmarks/coin_flip/__init__.py +0 -0
- evalscope/benchmarks/coin_flip/coin_flip_adapter.py +128 -0
- evalscope/benchmarks/commonsense_qa/__init__.py +0 -0
- evalscope/benchmarks/commonsense_qa/commonsense_qa_adapter.py +32 -0
- evalscope/benchmarks/competition_math/competition_math_adapter.py +5 -0
- evalscope/benchmarks/data_collection/data_collection_adapter.py +24 -19
- evalscope/benchmarks/docvqa/__init__.py +0 -0
- evalscope/benchmarks/docvqa/docvqa_adapter.py +67 -0
- evalscope/benchmarks/drivelology/__init__.py +0 -0
- evalscope/benchmarks/drivelology/drivelology_binary_adapter.py +170 -0
- evalscope/benchmarks/drivelology/drivelology_multilabel_adapter.py +254 -0
- evalscope/benchmarks/drivelology/drivelology_selection_adapter.py +49 -0
- evalscope/benchmarks/drivelology/drivelology_writing_adapter.py +218 -0
- evalscope/benchmarks/drop/drop_adapter.py +15 -44
- evalscope/benchmarks/drop/utils.py +97 -0
- evalscope/benchmarks/frames/frames_adapter.py +2 -1
- evalscope/benchmarks/general_arena/general_arena_adapter.py +7 -2
- evalscope/benchmarks/general_arena/utils.py +2 -1
- evalscope/benchmarks/general_mcq/general_mcq_adapter.py +1 -1
- evalscope/benchmarks/general_qa/general_qa_adapter.py +1 -1
- evalscope/benchmarks/gsm8k/gsm8k_adapter.py +25 -9
- evalscope/benchmarks/hallusion_bench/__init__.py +0 -0
- evalscope/benchmarks/hallusion_bench/hallusion_bench_adapter.py +159 -0
- evalscope/benchmarks/halu_eval/__init__.py +0 -0
- evalscope/benchmarks/halu_eval/halu_eval_adapter.py +128 -0
- evalscope/benchmarks/halu_eval/halu_eval_instructions.py +84 -0
- evalscope/benchmarks/healthbench/__init__.py +0 -0
- evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
- evalscope/benchmarks/healthbench/utils.py +102 -0
- evalscope/benchmarks/hle/hle_adapter.py +3 -2
- evalscope/benchmarks/humaneval/humaneval_adapter.py +24 -52
- evalscope/benchmarks/humaneval/utils.py +235 -0
- evalscope/benchmarks/ifeval/instructions_util.py +2 -3
- evalscope/benchmarks/image_edit/__init__.py +0 -0
- evalscope/benchmarks/image_edit/gedit/__init__.py +0 -0
- evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
- evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
- evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
- evalscope/benchmarks/infovqa/__init__.py +0 -0
- evalscope/benchmarks/infovqa/infovqa_adapter.py +66 -0
- evalscope/benchmarks/live_code_bench/evaluate_utils.py +13 -6
- evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +66 -54
- evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
- evalscope/benchmarks/logi_qa/__int__.py +0 -0
- evalscope/benchmarks/logi_qa/logi_qa_adapter.py +41 -0
- evalscope/benchmarks/math_500/math_500_adapter.py +5 -1
- evalscope/benchmarks/math_qa/__init__.py +0 -0
- evalscope/benchmarks/math_qa/math_qa_adapter.py +35 -0
- evalscope/benchmarks/math_verse/__init__.py +0 -0
- evalscope/benchmarks/math_verse/math_verse_adapter.py +105 -0
- evalscope/benchmarks/math_vision/__init__.py +0 -0
- evalscope/benchmarks/math_vision/math_vision_adapter.py +116 -0
- evalscope/benchmarks/math_vista/__init__.py +0 -0
- evalscope/benchmarks/math_vista/math_vista_adapter.py +114 -0
- evalscope/benchmarks/med_mcqa/__init__.py +0 -0
- evalscope/benchmarks/med_mcqa/med_mcqa_adapter.py +32 -0
- evalscope/benchmarks/minerva_math/__init__.py +0 -0
- evalscope/benchmarks/minerva_math/minerva_math_adapter.py +53 -0
- evalscope/benchmarks/mm_bench/__init__.py +0 -0
- evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
- evalscope/benchmarks/mm_star/__init__.py +0 -0
- evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
- evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +1 -1
- evalscope/benchmarks/mmmu/__init__.py +0 -0
- evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
- evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
- evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +124 -0
- evalscope/benchmarks/mri_mcqa/__init__.py +0 -0
- evalscope/benchmarks/mri_mcqa/mri_mcqa_adapter.py +34 -0
- evalscope/benchmarks/multi_if/__init__.py +0 -0
- evalscope/benchmarks/multi_if/ifeval.py +3354 -0
- evalscope/benchmarks/multi_if/metrics.py +120 -0
- evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
- evalscope/benchmarks/music_trivia/__init__.py +0 -0
- evalscope/benchmarks/music_trivia/music_trivia_adapter.py +36 -0
- evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +7 -6
- evalscope/benchmarks/ner/__init__.py +0 -0
- evalscope/benchmarks/ner/broad_twitter_corpus_adapter.py +52 -0
- evalscope/benchmarks/ner/conll2003_adapter.py +48 -0
- evalscope/benchmarks/ner/copious_adapter.py +85 -0
- evalscope/benchmarks/ner/cross_ner_adapter.py +120 -0
- evalscope/benchmarks/ner/cross_ner_entities/__init__.py +0 -0
- evalscope/benchmarks/ner/cross_ner_entities/ai.py +54 -0
- evalscope/benchmarks/ner/cross_ner_entities/literature.py +36 -0
- evalscope/benchmarks/ner/cross_ner_entities/music.py +39 -0
- evalscope/benchmarks/ner/cross_ner_entities/politics.py +37 -0
- evalscope/benchmarks/ner/cross_ner_entities/science.py +58 -0
- evalscope/benchmarks/ner/genia_ner_adapter.py +66 -0
- evalscope/benchmarks/ner/harvey_ner_adapter.py +58 -0
- evalscope/benchmarks/ner/mit_movie_trivia_adapter.py +74 -0
- evalscope/benchmarks/ner/mit_restaurant_adapter.py +66 -0
- evalscope/benchmarks/ner/ontonotes5_adapter.py +87 -0
- evalscope/benchmarks/ner/wnut2017_adapter.py +61 -0
- evalscope/benchmarks/ocr_bench/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench/ocr_bench_adapter.py +101 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/IoUscore_metric.py +87 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/TEDS_metric.py +963 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/ocr_bench_v2_adapter.py +161 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/page_ocr_metric.py +50 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/parallel.py +46 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/readme.txt +26 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +537 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/script.py +481 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_metric.py +179 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/utils.py +433 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/vqa_metric.py +254 -0
- evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
- evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
- evalscope/benchmarks/olympiad_bench/utils.py +565 -0
- evalscope/benchmarks/omni_bench/__init__.py +0 -0
- evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
- evalscope/benchmarks/omnidoc_bench/__init__.py +0 -0
- evalscope/benchmarks/omnidoc_bench/end2end_eval.py +349 -0
- evalscope/benchmarks/omnidoc_bench/metrics.py +547 -0
- evalscope/benchmarks/omnidoc_bench/omnidoc_bench_adapter.py +135 -0
- evalscope/benchmarks/omnidoc_bench/utils.py +1937 -0
- evalscope/benchmarks/piqa/__init__.py +0 -0
- evalscope/benchmarks/piqa/piqa_adapter.py +32 -0
- evalscope/benchmarks/poly_math/__init__.py +0 -0
- evalscope/benchmarks/poly_math/poly_math_adapter.py +132 -0
- evalscope/benchmarks/poly_math/utils/instruction.py +105 -0
- evalscope/benchmarks/pope/__init__.py +0 -0
- evalscope/benchmarks/pope/pope_adapter.py +112 -0
- evalscope/benchmarks/process_bench/process_bench_adapter.py +1 -0
- evalscope/benchmarks/pumed_qa/__init__.py +0 -0
- evalscope/benchmarks/pumed_qa/pubmed_qa_adapter.py +175 -0
- evalscope/benchmarks/qasc/__init__.py +0 -0
- evalscope/benchmarks/qasc/qasc_adapter.py +35 -0
- evalscope/benchmarks/real_world_qa/__init__.py +0 -0
- evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
- evalscope/benchmarks/sciq/__init__.py +0 -0
- evalscope/benchmarks/sciq/sciq_adapter.py +36 -0
- evalscope/benchmarks/seed_bench_2_plus/__init__.py +0 -0
- evalscope/benchmarks/seed_bench_2_plus/seed_bench_2_plus_adapter.py +72 -0
- evalscope/benchmarks/simple_qa/simple_qa_adapter.py +1 -1
- evalscope/benchmarks/simple_vqa/__init__.py +0 -0
- evalscope/benchmarks/simple_vqa/simple_vqa_adapter.py +169 -0
- evalscope/benchmarks/siqa/__init__.py +0 -0
- evalscope/benchmarks/siqa/siqa_adapter.py +39 -0
- evalscope/benchmarks/tau_bench/tau2_bench/__init__.py +0 -0
- evalscope/benchmarks/tau_bench/tau2_bench/generation.py +158 -0
- evalscope/benchmarks/tau_bench/tau2_bench/tau2_bench_adapter.py +146 -0
- evalscope/benchmarks/tau_bench/tau_bench/__init__.py +0 -0
- evalscope/benchmarks/tau_bench/{generation.py → tau_bench/generation.py} +1 -1
- evalscope/benchmarks/tau_bench/{tau_bench_adapter.py → tau_bench/tau_bench_adapter.py} +29 -29
- evalscope/benchmarks/text2image/__init__.py +0 -0
- evalscope/benchmarks/{aigc/t2i → text2image}/evalmuse_adapter.py +3 -1
- evalscope/benchmarks/{aigc/t2i → text2image}/genai_bench_adapter.py +2 -2
- evalscope/benchmarks/{aigc/t2i → text2image}/general_t2i_adapter.py +1 -1
- evalscope/benchmarks/{aigc/t2i → text2image}/hpdv2_adapter.py +7 -2
- evalscope/benchmarks/{aigc/t2i → text2image}/tifa_adapter.py +1 -0
- evalscope/benchmarks/tool_bench/tool_bench_adapter.py +3 -3
- evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +1 -2
- evalscope/benchmarks/visu_logic/__init__.py +0 -0
- evalscope/benchmarks/visu_logic/visu_logic_adapter.py +75 -0
- evalscope/benchmarks/wmt/__init__.py +0 -0
- evalscope/benchmarks/wmt/wmt24_adapter.py +294 -0
- evalscope/benchmarks/zerobench/__init__.py +0 -0
- evalscope/benchmarks/zerobench/zerobench_adapter.py +64 -0
- evalscope/cli/start_app.py +7 -1
- evalscope/cli/start_perf.py +7 -1
- evalscope/config.py +103 -18
- evalscope/constants.py +18 -0
- evalscope/evaluator/evaluator.py +138 -82
- evalscope/metrics/bert_score/__init__.py +0 -0
- evalscope/metrics/bert_score/scorer.py +338 -0
- evalscope/metrics/bert_score/utils.py +697 -0
- evalscope/metrics/llm_judge.py +19 -7
- evalscope/metrics/math_parser.py +14 -0
- evalscope/metrics/metric.py +317 -13
- evalscope/metrics/metrics.py +37 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +2 -6
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +2 -6
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +2 -6
- evalscope/models/image_edit_model.py +125 -0
- evalscope/models/model_apis.py +22 -0
- evalscope/models/openai_compatible.py +21 -0
- evalscope/models/text2image_model.py +2 -2
- evalscope/models/utils/openai.py +16 -6
- evalscope/perf/arguments.py +26 -4
- evalscope/perf/benchmark.py +76 -89
- evalscope/perf/http_client.py +31 -16
- evalscope/perf/main.py +15 -2
- evalscope/perf/plugin/api/base.py +9 -7
- evalscope/perf/plugin/api/custom_api.py +13 -58
- evalscope/perf/plugin/api/default_api.py +188 -79
- evalscope/perf/plugin/api/openai_api.py +85 -20
- evalscope/perf/plugin/datasets/base.py +21 -0
- evalscope/perf/plugin/datasets/custom.py +2 -3
- evalscope/perf/plugin/datasets/flickr8k.py +2 -2
- evalscope/perf/plugin/datasets/kontext_bench.py +2 -2
- evalscope/perf/plugin/datasets/line_by_line.py +2 -3
- evalscope/perf/plugin/datasets/longalpaca.py +2 -3
- evalscope/perf/plugin/datasets/openqa.py +2 -4
- evalscope/perf/plugin/datasets/random_dataset.py +1 -3
- evalscope/perf/plugin/datasets/random_vl_dataset.py +2 -2
- evalscope/perf/utils/benchmark_util.py +43 -27
- evalscope/perf/utils/db_util.py +14 -19
- evalscope/perf/utils/local_server.py +3 -44
- evalscope/perf/utils/log_utils.py +21 -6
- evalscope/report/__init__.py +13 -3
- evalscope/report/combinator.py +91 -20
- evalscope/report/generator.py +8 -87
- evalscope/report/report.py +8 -4
- evalscope/run.py +13 -5
- evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -4
- evalscope/utils/argument_utils.py +1 -1
- evalscope/utils/chat_service.py +1 -1
- evalscope/utils/function_utils.py +249 -12
- evalscope/utils/import_utils.py +73 -1
- evalscope/utils/io_utils.py +132 -7
- evalscope/utils/json_schema.py +25 -2
- evalscope/utils/logger.py +69 -18
- evalscope/utils/model_utils.py +4 -3
- evalscope/utils/multi_choices.py +39 -7
- evalscope/utils/ner.py +377 -0
- evalscope/version.py +2 -2
- {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/METADATA +252 -408
- {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/RECORD +290 -154
- {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/WHEEL +1 -1
- {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/top_level.txt +0 -1
- evalscope/api/mixin/dataset_mixin.py +0 -105
- evalscope/benchmarks/aigc/i2i/general_i2i_adapter.py +0 -44
- tests/__init__.py +0 -1
- tests/aigc/__init__.py +0 -1
- tests/aigc/test_t2i.py +0 -142
- tests/benchmark/__init__.py +0 -1
- tests/benchmark/test_eval.py +0 -386
- tests/cli/__init__.py +0 -1
- tests/cli/test_all.py +0 -229
- tests/cli/test_collection.py +0 -96
- tests/cli/test_custom.py +0 -268
- tests/perf/__init__.py +0 -1
- tests/perf/test_perf.py +0 -176
- tests/rag/test_clip_benchmark.py +0 -90
- tests/rag/test_mteb.py +0 -213
- tests/rag/test_ragas.py +0 -128
- tests/swift/__init__.py +0 -1
- tests/swift/test_run_swift_eval.py +0 -146
- tests/swift/test_run_swift_vlm_eval.py +0 -128
- tests/swift/test_run_swift_vlm_jugde_eval.py +0 -157
- tests/test_run_all.py +0 -12
- tests/utils.py +0 -13
- tests/vlm/__init__.py +0 -1
- tests/vlm/test_vlmeval.py +0 -102
- /evalscope/benchmarks/{aigc → aa_lcr}/__init__.py +0 -0
- /evalscope/benchmarks/{aigc/i2i → ai2d}/__init__.py +0 -0
- /evalscope/benchmarks/{aigc/t2i → amc}/__init__.py +0 -0
- {tests/rag → evalscope/benchmarks/bfcl/v3}/__init__.py +0 -0
- {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/entry_points.txt +0 -0
- {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info/licenses}/LICENSE +0 -0
|
@@ -1,32 +1,30 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
2
|
Name: evalscope
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.2.0
|
|
4
4
|
Summary: EvalScope: Lightweight LLMs Evaluation Framework
|
|
5
|
-
Home-page: https://github.com/modelscope/evalscope
|
|
6
5
|
Author: ModelScope team
|
|
7
6
|
Author-email: contact@modelscope.cn
|
|
8
7
|
License: Apache License 2.0
|
|
8
|
+
Project-URL: Homepage, https://github.com/modelscope/evalscope
|
|
9
9
|
Keywords: python,llm,evaluation
|
|
10
10
|
Classifier: Development Status :: 4 - Beta
|
|
11
11
|
Classifier: Operating System :: OS Independent
|
|
12
12
|
Classifier: Programming Language :: Python :: 3
|
|
13
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
14
13
|
Classifier: Programming Language :: Python :: 3.10
|
|
15
14
|
Classifier: Programming Language :: Python :: 3.11
|
|
16
15
|
Classifier: Programming Language :: Python :: 3.12
|
|
17
|
-
|
|
16
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
17
|
+
Requires-Python: >=3.10
|
|
18
18
|
Description-Content-Type: text/markdown
|
|
19
19
|
License-File: LICENSE
|
|
20
|
-
Requires-Dist: accelerate
|
|
21
20
|
Requires-Dist: colorlog
|
|
22
21
|
Requires-Dist: datasets==3.6.0
|
|
23
|
-
Requires-Dist:
|
|
22
|
+
Requires-Dist: docstring_parser
|
|
24
23
|
Requires-Dist: dotenv
|
|
25
|
-
Requires-Dist: immutabledict
|
|
26
24
|
Requires-Dist: jieba
|
|
27
25
|
Requires-Dist: jsonlines
|
|
28
26
|
Requires-Dist: langdetect
|
|
29
|
-
Requires-Dist:
|
|
27
|
+
Requires-Dist: latex2sympy2_extended[antlr4_9_3]
|
|
30
28
|
Requires-Dist: matplotlib
|
|
31
29
|
Requires-Dist: modelscope[framework]>=1.27
|
|
32
30
|
Requires-Dist: nltk>=3.9
|
|
@@ -34,8 +32,8 @@ Requires-Dist: openai
|
|
|
34
32
|
Requires-Dist: overrides
|
|
35
33
|
Requires-Dist: pandas
|
|
36
34
|
Requires-Dist: pillow
|
|
37
|
-
Requires-Dist: pyarrow
|
|
38
35
|
Requires-Dist: pydantic
|
|
36
|
+
Requires-Dist: pylatexenc
|
|
39
37
|
Requires-Dist: pyyaml>=5.1
|
|
40
38
|
Requires-Dist: requests
|
|
41
39
|
Requires-Dist: rich
|
|
@@ -46,52 +44,57 @@ Requires-Dist: scikit-learn
|
|
|
46
44
|
Requires-Dist: seaborn
|
|
47
45
|
Requires-Dist: sympy
|
|
48
46
|
Requires-Dist: tabulate
|
|
49
|
-
Requires-Dist: torch
|
|
50
47
|
Requires-Dist: tqdm
|
|
51
48
|
Requires-Dist: transformers>=4.33
|
|
52
49
|
Requires-Dist: word2number
|
|
50
|
+
Provides-Extra: opencompass
|
|
51
|
+
Requires-Dist: ms-opencompass>=0.1.6; extra == "opencompass"
|
|
52
|
+
Provides-Extra: vlmeval
|
|
53
|
+
Requires-Dist: ms-vlmeval>=0.0.17; extra == "vlmeval"
|
|
54
|
+
Provides-Extra: rag
|
|
55
|
+
Requires-Dist: langchain<0.4.0,>=0.3.0; extra == "rag"
|
|
56
|
+
Requires-Dist: langchain-community<0.4.0,>=0.3.0; extra == "rag"
|
|
57
|
+
Requires-Dist: langchain-core<0.4.0,>=0.3.0; extra == "rag"
|
|
58
|
+
Requires-Dist: langchain-openai<0.4.0,>=0.3.0; extra == "rag"
|
|
59
|
+
Requires-Dist: mteb==1.38.20; extra == "rag"
|
|
60
|
+
Requires-Dist: ragas==0.2.14; extra == "rag"
|
|
61
|
+
Requires-Dist: torch; extra == "rag"
|
|
62
|
+
Requires-Dist: webdataset>0.2.0; extra == "rag"
|
|
63
|
+
Provides-Extra: perf
|
|
64
|
+
Requires-Dist: aiohttp; extra == "perf"
|
|
65
|
+
Requires-Dist: fastapi; extra == "perf"
|
|
66
|
+
Requires-Dist: jinja2; extra == "perf"
|
|
67
|
+
Requires-Dist: numpy; extra == "perf"
|
|
68
|
+
Requires-Dist: rich; extra == "perf"
|
|
69
|
+
Requires-Dist: sse_starlette; extra == "perf"
|
|
70
|
+
Requires-Dist: transformers; extra == "perf"
|
|
71
|
+
Requires-Dist: uvicorn; extra == "perf"
|
|
72
|
+
Provides-Extra: app
|
|
73
|
+
Requires-Dist: gradio==5.4.0; extra == "app"
|
|
74
|
+
Requires-Dist: plotly<6.0.0,>=5.23.0; extra == "app"
|
|
53
75
|
Provides-Extra: aigc
|
|
54
76
|
Requires-Dist: diffusers; extra == "aigc"
|
|
55
77
|
Requires-Dist: iopath; extra == "aigc"
|
|
56
78
|
Requires-Dist: omegaconf; extra == "aigc"
|
|
57
|
-
Requires-Dist:
|
|
79
|
+
Requires-Dist: open_clip_torch; extra == "aigc"
|
|
58
80
|
Requires-Dist: opencv-python; extra == "aigc"
|
|
59
81
|
Requires-Dist: peft>=0.17; extra == "aigc"
|
|
82
|
+
Requires-Dist: torch; extra == "aigc"
|
|
60
83
|
Requires-Dist: torchvision; extra == "aigc"
|
|
84
|
+
Provides-Extra: sandbox
|
|
85
|
+
Requires-Dist: ms-enclave[docker]; extra == "sandbox"
|
|
86
|
+
Provides-Extra: dev
|
|
87
|
+
Requires-Dist: pytest; extra == "dev"
|
|
88
|
+
Requires-Dist: pytest-cov; extra == "dev"
|
|
89
|
+
Requires-Dist: python-dotenv; extra == "dev"
|
|
90
|
+
Provides-Extra: docs
|
|
91
|
+
Requires-Dist: docutils>=0.16.0; extra == "docs"
|
|
92
|
+
Requires-Dist: myst_parser; extra == "docs"
|
|
93
|
+
Requires-Dist: recommonmark; extra == "docs"
|
|
94
|
+
Requires-Dist: sphinx>=5.3.0; extra == "docs"
|
|
95
|
+
Requires-Dist: sphinx-design; extra == "docs"
|
|
96
|
+
Requires-Dist: sphinxawesome-theme; extra == "docs"
|
|
61
97
|
Provides-Extra: all
|
|
62
|
-
Requires-Dist: accelerate; extra == "all"
|
|
63
|
-
Requires-Dist: colorlog; extra == "all"
|
|
64
|
-
Requires-Dist: datasets==3.6.0; extra == "all"
|
|
65
|
-
Requires-Dist: docstring-parser; extra == "all"
|
|
66
|
-
Requires-Dist: dotenv; extra == "all"
|
|
67
|
-
Requires-Dist: immutabledict; extra == "all"
|
|
68
|
-
Requires-Dist: jieba; extra == "all"
|
|
69
|
-
Requires-Dist: jsonlines; extra == "all"
|
|
70
|
-
Requires-Dist: langdetect; extra == "all"
|
|
71
|
-
Requires-Dist: latex2sympy2-extended; extra == "all"
|
|
72
|
-
Requires-Dist: matplotlib; extra == "all"
|
|
73
|
-
Requires-Dist: modelscope[framework]>=1.27; extra == "all"
|
|
74
|
-
Requires-Dist: nltk>=3.9; extra == "all"
|
|
75
|
-
Requires-Dist: openai; extra == "all"
|
|
76
|
-
Requires-Dist: overrides; extra == "all"
|
|
77
|
-
Requires-Dist: pandas; extra == "all"
|
|
78
|
-
Requires-Dist: pillow; extra == "all"
|
|
79
|
-
Requires-Dist: pyarrow; extra == "all"
|
|
80
|
-
Requires-Dist: pydantic; extra == "all"
|
|
81
|
-
Requires-Dist: pyyaml>=5.1; extra == "all"
|
|
82
|
-
Requires-Dist: requests; extra == "all"
|
|
83
|
-
Requires-Dist: rich; extra == "all"
|
|
84
|
-
Requires-Dist: rouge-chinese; extra == "all"
|
|
85
|
-
Requires-Dist: rouge-score>=0.1.0; extra == "all"
|
|
86
|
-
Requires-Dist: sacrebleu; extra == "all"
|
|
87
|
-
Requires-Dist: scikit-learn; extra == "all"
|
|
88
|
-
Requires-Dist: seaborn; extra == "all"
|
|
89
|
-
Requires-Dist: sympy; extra == "all"
|
|
90
|
-
Requires-Dist: tabulate; extra == "all"
|
|
91
|
-
Requires-Dist: torch; extra == "all"
|
|
92
|
-
Requires-Dist: tqdm; extra == "all"
|
|
93
|
-
Requires-Dist: transformers>=4.33; extra == "all"
|
|
94
|
-
Requires-Dist: word2number; extra == "all"
|
|
95
98
|
Requires-Dist: ms-opencompass>=0.1.6; extra == "all"
|
|
96
99
|
Requires-Dist: ms-vlmeval>=0.0.17; extra == "all"
|
|
97
100
|
Requires-Dist: langchain<0.4.0,>=0.3.0; extra == "all"
|
|
@@ -100,11 +103,14 @@ Requires-Dist: langchain-core<0.4.0,>=0.3.0; extra == "all"
|
|
|
100
103
|
Requires-Dist: langchain-openai<0.4.0,>=0.3.0; extra == "all"
|
|
101
104
|
Requires-Dist: mteb==1.38.20; extra == "all"
|
|
102
105
|
Requires-Dist: ragas==0.2.14; extra == "all"
|
|
106
|
+
Requires-Dist: torch; extra == "all"
|
|
103
107
|
Requires-Dist: webdataset>0.2.0; extra == "all"
|
|
104
108
|
Requires-Dist: aiohttp; extra == "all"
|
|
105
109
|
Requires-Dist: fastapi; extra == "all"
|
|
110
|
+
Requires-Dist: jinja2; extra == "all"
|
|
106
111
|
Requires-Dist: numpy; extra == "all"
|
|
107
|
-
Requires-Dist:
|
|
112
|
+
Requires-Dist: rich; extra == "all"
|
|
113
|
+
Requires-Dist: sse_starlette; extra == "all"
|
|
108
114
|
Requires-Dist: transformers; extra == "all"
|
|
109
115
|
Requires-Dist: uvicorn; extra == "all"
|
|
110
116
|
Requires-Dist: gradio==5.4.0; extra == "all"
|
|
@@ -112,51 +118,12 @@ Requires-Dist: plotly<6.0.0,>=5.23.0; extra == "all"
|
|
|
112
118
|
Requires-Dist: diffusers; extra == "all"
|
|
113
119
|
Requires-Dist: iopath; extra == "all"
|
|
114
120
|
Requires-Dist: omegaconf; extra == "all"
|
|
115
|
-
Requires-Dist:
|
|
121
|
+
Requires-Dist: open_clip_torch; extra == "all"
|
|
116
122
|
Requires-Dist: opencv-python; extra == "all"
|
|
117
123
|
Requires-Dist: peft>=0.17; extra == "all"
|
|
124
|
+
Requires-Dist: torch; extra == "all"
|
|
118
125
|
Requires-Dist: torchvision; extra == "all"
|
|
119
|
-
|
|
120
|
-
Requires-Dist: human-eval; extra == "all"
|
|
121
|
-
Requires-Dist: pytest; extra == "all"
|
|
122
|
-
Requires-Dist: pytest-cov; extra == "all"
|
|
123
|
-
Requires-Dist: python-dotenv; extra == "all"
|
|
124
|
-
Provides-Extra: app
|
|
125
|
-
Requires-Dist: gradio==5.4.0; extra == "app"
|
|
126
|
-
Requires-Dist: plotly<6.0.0,>=5.23.0; extra == "app"
|
|
127
|
-
Provides-Extra: dev
|
|
128
|
-
Requires-Dist: bfcl-eval==2025.6.16; extra == "dev"
|
|
129
|
-
Requires-Dist: human-eval; extra == "dev"
|
|
130
|
-
Requires-Dist: pytest; extra == "dev"
|
|
131
|
-
Requires-Dist: pytest-cov; extra == "dev"
|
|
132
|
-
Requires-Dist: python-dotenv; extra == "dev"
|
|
133
|
-
Provides-Extra: docs
|
|
134
|
-
Requires-Dist: docutils>=0.16.0; extra == "docs"
|
|
135
|
-
Requires-Dist: myst-parser; extra == "docs"
|
|
136
|
-
Requires-Dist: recommonmark; extra == "docs"
|
|
137
|
-
Requires-Dist: sphinx>=5.3.0; extra == "docs"
|
|
138
|
-
Requires-Dist: sphinx-design; extra == "docs"
|
|
139
|
-
Requires-Dist: sphinxawesome-theme; extra == "docs"
|
|
140
|
-
Provides-Extra: opencompass
|
|
141
|
-
Requires-Dist: ms-opencompass>=0.1.6; extra == "opencompass"
|
|
142
|
-
Provides-Extra: perf
|
|
143
|
-
Requires-Dist: aiohttp; extra == "perf"
|
|
144
|
-
Requires-Dist: fastapi; extra == "perf"
|
|
145
|
-
Requires-Dist: numpy; extra == "perf"
|
|
146
|
-
Requires-Dist: rich; extra == "perf"
|
|
147
|
-
Requires-Dist: sse-starlette; extra == "perf"
|
|
148
|
-
Requires-Dist: transformers; extra == "perf"
|
|
149
|
-
Requires-Dist: uvicorn; extra == "perf"
|
|
150
|
-
Provides-Extra: rag
|
|
151
|
-
Requires-Dist: langchain<0.4.0,>=0.3.0; extra == "rag"
|
|
152
|
-
Requires-Dist: langchain-community<0.4.0,>=0.3.0; extra == "rag"
|
|
153
|
-
Requires-Dist: langchain-core<0.4.0,>=0.3.0; extra == "rag"
|
|
154
|
-
Requires-Dist: langchain-openai<0.4.0,>=0.3.0; extra == "rag"
|
|
155
|
-
Requires-Dist: mteb==1.38.20; extra == "rag"
|
|
156
|
-
Requires-Dist: ragas==0.2.14; extra == "rag"
|
|
157
|
-
Requires-Dist: webdataset>0.2.0; extra == "rag"
|
|
158
|
-
Provides-Extra: vlmeval
|
|
159
|
-
Requires-Dist: ms-vlmeval>=0.0.17; extra == "vlmeval"
|
|
126
|
+
Dynamic: license-file
|
|
160
127
|
|
|
161
128
|
<p align="center">
|
|
162
129
|
<br>
|
|
@@ -164,13 +131,12 @@ Requires-Dist: ms-vlmeval>=0.0.17; extra == "vlmeval"
|
|
|
164
131
|
<br>
|
|
165
132
|
<p>
|
|
166
133
|
|
|
167
|
-
|
|
168
134
|
<p align="center">
|
|
169
135
|
<a href="README_zh.md">中文</a>   |   English  
|
|
170
136
|
</p>
|
|
171
137
|
|
|
172
138
|
<p align="center">
|
|
173
|
-
<img src="https://img.shields.io/badge/python-%E2%89%A53.
|
|
139
|
+
<img src="https://img.shields.io/badge/python-%E2%89%A53.10-5be.svg">
|
|
174
140
|
<a href="https://badge.fury.io/py/evalscope"><img src="https://badge.fury.io/py/evalscope.svg" alt="PyPI version" height="18"></a>
|
|
175
141
|
<a href="https://pypi.org/project/evalscope"><img alt="PyPI - Downloads" src="https://static.pepy.tech/badge/evalscope"></a>
|
|
176
142
|
<a href="https://github.com/modelscope/evalscope/pulls"><img src="https://img.shields.io/badge/PR-welcome-55EB99.svg"></a>
|
|
@@ -178,123 +144,86 @@ Requires-Dist: ms-vlmeval>=0.0.17; extra == "vlmeval"
|
|
|
178
144
|
<p>
|
|
179
145
|
|
|
180
146
|
<p align="center">
|
|
181
|
-
<a href="https://evalscope.readthedocs.io/zh-cn/latest/"> 📖
|
|
147
|
+
<a href="https://evalscope.readthedocs.io/zh-cn/latest/"> 📖 Chinese Documentation</a>   |   <a href="https://evalscope.readthedocs.io/en/latest/"> 📖 English Documentation</a>
|
|
182
148
|
<p>
|
|
183
149
|
|
|
184
|
-
> ⭐ If you like this project, please click the "Star" button at the top right to support us. Your support is our motivation to keep going!
|
|
185
|
-
|
|
186
|
-
## 📋 Contents
|
|
187
|
-
- [📋 Contents](#-contents)
|
|
188
|
-
- [📝 Introduction](#-introduction)
|
|
189
|
-
- [☎ User Groups](#-user-groups)
|
|
190
|
-
- [🎉 News](#-news)
|
|
191
|
-
- [🛠️ Environment Setup](#️-environment-setup)
|
|
192
|
-
- [Method 1. Install via pip](#method-1-install-via-pip)
|
|
193
|
-
- [Method 2. Install from source](#method-2-install-from-source)
|
|
194
|
-
- [🚀 Quick Start](#-quick-start)
|
|
195
|
-
- [Method 1. Using Command Line](#method-1-using-command-line)
|
|
196
|
-
- [Method 2. Using Python Code](#method-2-using-python-code)
|
|
197
|
-
- [Basic Parameter](#basic-parameter)
|
|
198
|
-
- [Output Results](#output-results)
|
|
199
|
-
- [📈 Visualization of Evaluation Results](#-visualization-of-evaluation-results)
|
|
200
|
-
- [🌐 Evaluation of Model API](#-evaluation-of-model-api)
|
|
201
|
-
- [⚙️ Custom Parameter Evaluation](#️-custom-parameter-evaluation)
|
|
202
|
-
- [Parameter Description](#parameter-description)
|
|
203
|
-
- [🧪 Other Evaluation Backends](#-other-evaluation-backends)
|
|
204
|
-
- [📈 Model Serving Performance Evaluation](#-model-serving-performance-evaluation)
|
|
205
|
-
- [🖊️ Custom Dataset Evaluation](#️-custom-dataset-evaluation)
|
|
206
|
-
- [⚔️ Arena Mode](#️-arena-mode)
|
|
207
|
-
- [👷♂️ Contribution](#️-contribution)
|
|
208
|
-
- [📚 Citation](#-citation)
|
|
209
|
-
- [🔜 Roadmap](#-roadmap)
|
|
210
|
-
- [⭐ Star History](#-star-history)
|
|
211
150
|
|
|
151
|
+
> ⭐ If you like this project, please click the "Star" button in the upper right corner to support us. Your support is our motivation to move forward!
|
|
212
152
|
|
|
213
153
|
## 📝 Introduction
|
|
214
154
|
|
|
215
|
-
EvalScope is a
|
|
155
|
+
EvalScope is a powerful and easily extensible model evaluation framework created by the [ModelScope Community](https://modelscope.cn/), aiming to provide a one-stop evaluation solution for large model developers.
|
|
216
156
|
|
|
217
|
-
-
|
|
218
|
-
- 🎨 Multimodal Models
|
|
219
|
-
- 🔍 Embedding Models
|
|
220
|
-
- 🏆 Reranker Models
|
|
221
|
-
- 🖼️ CLIP Models
|
|
222
|
-
- 🎭 AIGC Models (Image-to-Text/Video)
|
|
223
|
-
- ...and more!
|
|
157
|
+
Whether you want to evaluate the general capabilities of models, conduct multi-model performance comparisons, or need to stress test models, EvalScope can meet your needs.
|
|
224
158
|
|
|
225
|
-
|
|
159
|
+
## ✨ Key Features
|
|
226
160
|
|
|
227
|
-
-
|
|
228
|
-
-
|
|
229
|
-
-
|
|
161
|
+
- **📚 Comprehensive Evaluation Benchmarks**: Built-in multiple industry-recognized evaluation benchmarks including MMLU, C-Eval, GSM8K, and more.
|
|
162
|
+
- **🧩 Multi-modal and Multi-domain Support**: Supports evaluation of various model types including Large Language Models (LLM), Vision Language Models (VLM), Embedding, Reranker, AIGC, and more.
|
|
163
|
+
- **🚀 Multi-backend Integration**: Seamlessly integrates multiple evaluation backends including OpenCompass, VLMEvalKit, RAGEval to meet different evaluation needs.
|
|
164
|
+
- **⚡ Inference Performance Testing**: Provides powerful model service stress testing tools, supporting multiple performance metrics such as TTFT, TPOT.
|
|
165
|
+
- **📊 Interactive Reports**: Provides WebUI visualization interface, supporting multi-dimensional model comparison, report overview and detailed inspection.
|
|
166
|
+
- **⚔️ Arena Mode**: Supports multi-model battles (Pairwise Battle), intuitively ranking and evaluating models.
|
|
167
|
+
- **🔧 Highly Extensible**: Developers can easily add custom datasets, models and evaluation metrics.
|
|
230
168
|
|
|
231
|
-
|
|
169
|
+
<details><summary>🏛️ Overall Architecture</summary>
|
|
232
170
|
|
|
233
171
|
<p align="center">
|
|
234
|
-
|
|
235
|
-
|
|
172
|
+
<img src="https://sail-moe.oss-cn-hangzhou.aliyuncs.com/yunlin/images/evalscope/doc/EvalScope%E6%9E%B6%E6%9E%84%E5%9B%BE.png" style="width: 70%;">
|
|
173
|
+
<br>EvalScope Overall Architecture.
|
|
236
174
|
</p>
|
|
237
175
|
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
1. Input Layer
|
|
242
|
-
- **Model Sources**: API models (OpenAI API), local models (ModelScope)
|
|
243
|
-
- **Datasets**: Standard evaluation benchmarks (MMLU/GSM8k, etc.), custom data (MCQ/QA)
|
|
244
|
-
|
|
245
|
-
2. Core Functions
|
|
246
|
-
- **Multi-backend Evaluation**
|
|
247
|
-
- Native backends: Unified evaluation for LLM/VLM/Embedding/T2I models
|
|
248
|
-
- Integrated frameworks: OpenCompass/MTEB/VLMEvalKit/RAGAS
|
|
176
|
+
1. **Input Layer**
|
|
177
|
+
- **Model Sources**: API models (OpenAI API), Local models (ModelScope)
|
|
178
|
+
- **Datasets**: Standard evaluation benchmarks (MMLU/GSM8k etc.), Custom data (MCQ/QA)
|
|
249
179
|
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
180
|
+
2. **Core Functions**
|
|
181
|
+
- **Multi-backend Evaluation**: Native backend, OpenCompass, MTEB, VLMEvalKit, RAGAS
|
|
182
|
+
- **Performance Monitoring**: Supports multiple model service APIs and data formats, tracking TTFT/TPOP and other metrics
|
|
183
|
+
- **Tool Extensions**: Integrates Tool-Bench, Needle-in-a-Haystack, etc.
|
|
254
184
|
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
3. Output Layer
|
|
259
|
-
- **Structured Reports**: Supports JSON/Tables/Logs
|
|
260
|
-
- **Visualization Platforms**: Supports Gradio/Wandb/SwanLab
|
|
185
|
+
3. **Output Layer**
|
|
186
|
+
- **Structured Reports**: Supports JSON, Table, Logs
|
|
187
|
+
- **Visualization Platform**: Supports Gradio, Wandb, SwanLab
|
|
261
188
|
|
|
262
189
|
</details>
|
|
263
190
|
|
|
264
|
-
##
|
|
265
|
-
|
|
266
|
-
Please scan the QR code below to join our community groups:
|
|
267
|
-
|
|
268
|
-
[Discord Group](https://discord.com/invite/D27yfEFVz5) | WeChat Group | DingTalk Group
|
|
269
|
-
:-------------------------:|:-------------------------:|:-------------------------:
|
|
270
|
-
<img src="docs/asset/discord_qr.jpg" width="160" height="160"> | <img src="docs/asset/wechat.png" width="160" height="160"> | <img src="docs/asset/dingding.png" width="160" height="160">
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
## 🎉 News
|
|
191
|
+
## 🎉 What's New
|
|
274
192
|
|
|
275
193
|
> [!IMPORTANT]
|
|
276
194
|
> **Version 1.0 Refactoring**
|
|
277
195
|
>
|
|
278
196
|
> Version 1.0 introduces a major overhaul of the evaluation framework, establishing a new, more modular and extensible API layer under `evalscope/api`. Key improvements include standardized data models for benchmarks, samples, and results; a registry-based design for components such as benchmarks and metrics; and a rewritten core evaluator that orchestrates the new architecture. Existing benchmark adapters have been migrated to this API, resulting in cleaner, more consistent, and easier-to-maintain implementations.
|
|
279
197
|
|
|
280
|
-
- 🔥 **[2025.
|
|
198
|
+
- 🔥 **[2025.11.07]** Added support for τ²-bench, an extended and enhanced version of τ-bench that includes a series of code fixes and adds telecom domain troubleshooting scenarios. Refer to the [usage documentation](https://evalscope.readthedocs.io/en/latest/third_party/tau2_bench.html).
|
|
199
|
+
- 🔥 **[2025.10.30]** Added support for BFCL-v4, enabling evaluation of agent capabilities including web search and long-term memory. See the [usage documentation](https://evalscope.readthedocs.io/en/latest/third_party/bfcl_v4.html).
|
|
200
|
+
- 🔥 **[2025.10.27]** Added support for LogiQA, HaluEval, MathQA, MRI-QA, PIQA, QASC, CommonsenseQA and other evaluation benchmarks. Thanks to @[penguinwang96825](https://github.com/penguinwang96825) for the code implementation.
|
|
201
|
+
- 🔥 **[2025.10.26]** Added support for Conll-2003, CrossNER, Copious, GeniaNER, HarveyNER, MIT-Movie-Trivia, MIT-Restaurant, OntoNotes5, WNUT2017 and other Named Entity Recognition evaluation benchmarks. Thanks to @[penguinwang96825](https://github.com/penguinwang96825) for the code implementation.
|
|
202
|
+
- 🔥 **[2025.10.21]** Optimized sandbox environment usage in code evaluation, supporting both local and remote operation modes. For details, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/sandbox.html).
|
|
203
|
+
- 🔥 **[2025.10.20]** Added support for evaluation benchmarks including PolyMath, SimpleVQA, MathVerse, MathVision, AA-LCR; optimized evalscope perf performance to align with vLLM Bench. For details, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/vs_vllm_bench.html).
|
|
204
|
+
- 🔥 **[2025.10.14]** Added support for OCRBench, OCRBench-v2, DocVQA, InfoVQA, ChartQA, and BLINK multimodal image-text evaluation benchmarks.
|
|
205
|
+
- 🔥 **[2025.09.22]** Code evaluation benchmarks (HumanEval, LiveCodeBench) now support running in a sandbox environment. To use this feature, please install [ms-enclave](https://github.com/modelscope/ms-enclave) first.
|
|
206
|
+
- 🔥 **[2025.09.19]** Added support for multimodal image-text evaluation benchmarks including RealWorldQA, AI2D, MMStar, MMBench, and OmniBench, as well as pure text evaluation benchmarks such as Multi-IF, HealthBench, and AMC.
|
|
207
|
+
- 🔥 **[2025.09.05]** Added support for vision-language multimodal model evaluation tasks, such as MathVista and MMMU. For more supported datasets, please [refer to the documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/vlm.html).
|
|
208
|
+
- 🔥 **[2025.09.04]** Added support for image editing task evaluation, including the [GEdit-Bench](https://modelscope.cn/datasets/stepfun-ai/GEdit-Bench) benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/image_edit.html).
|
|
209
|
+
- 🔥 **[2025.08.22]** Version 1.0 Refactoring. Break changes, please [refer to](https://evalscope.readthedocs.io/en/latest/get_started/basic_usage.html#switching-to-version-v1-0).
|
|
281
210
|
- 🔥 **[2025.07.18]** The model stress testing now supports randomly generating image-text data for multimodal model evaluation. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#id4).
|
|
282
|
-
- 🔥 **[2025.07.16]** Support for [τ-bench](https://github.com/sierra-research/tau-bench) has been added, enabling the evaluation of AI Agent performance and reliability in real-world scenarios involving dynamic user and tool interactions. For usage instructions, please refer to the [documentation](https://evalscope.readthedocs.io/
|
|
211
|
+
- 🔥 **[2025.07.16]** Support for [τ-bench](https://github.com/sierra-research/tau-bench) has been added, enabling the evaluation of AI Agent performance and reliability in real-world scenarios involving dynamic user and tool interactions. For usage instructions, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/llm.html#bench).
|
|
283
212
|
- 🔥 **[2025.07.14]** Support for "Humanity's Last Exam" ([Humanity's-Last-Exam](https://modelscope.cn/datasets/cais/hle)), a highly challenging evaluation benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/llm.html#humanity-s-last-exam).
|
|
284
213
|
- 🔥 **[2025.07.03]** Refactored Arena Mode: now supports custom model battles, outputs a model leaderboard, and provides battle result visualization. See [reference](https://evalscope.readthedocs.io/en/latest/user_guides/arena.html) for details.
|
|
214
|
+
<details><summary>More</summary>
|
|
215
|
+
|
|
285
216
|
- 🔥 **[2025.06.28]** Optimized custom dataset evaluation: now supports evaluation without reference answers. Enhanced LLM judge usage, with built-in modes for "scoring directly without reference answers" and "checking answer consistency with reference answers". See [reference](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset/llm.html#qa) for details.
|
|
286
|
-
- 🔥 **[2025.06.19]** Added support for the [BFCL-v3](https://modelscope.cn/datasets/AI-ModelScope/bfcl_v3) benchmark, designed to evaluate model function-calling capabilities across various scenarios. For more information, refer to the [documentation](https://evalscope.readthedocs.io/
|
|
217
|
+
- 🔥 **[2025.06.19]** Added support for the [BFCL-v3](https://modelscope.cn/datasets/AI-ModelScope/bfcl_v3) benchmark, designed to evaluate model function-calling capabilities across various scenarios. For more information, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/bfcl_v3.html).
|
|
287
218
|
- 🔥 **[2025.06.02]** Added support for the Needle-in-a-Haystack test. Simply specify `needle_haystack` to conduct the test, and a corresponding heatmap will be generated in the `outputs/reports` folder, providing a visual representation of the model's performance. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/needle_haystack.html) for more details.
|
|
288
|
-
- 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html).
|
|
219
|
+
- 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html).
|
|
289
220
|
- 🔥 **[2025.05.16]** Model service performance stress testing now supports setting various levels of concurrency and outputs a performance test report. [Reference example](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/quick_start.html#id3).
|
|
290
221
|
- 🔥 **[2025.05.13]** Added support for the [ToolBench-Static](https://modelscope.cn/datasets/AI-ModelScope/ToolBench-Static) dataset to evaluate model's tool-calling capabilities. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) for usage instructions. Also added support for the [DROP](https://modelscope.cn/datasets/AI-ModelScope/DROP/dataPeview) and [Winogrande](https://modelscope.cn/datasets/AI-ModelScope/winogrande_val) benchmarks to assess the reasoning capabilities of models.
|
|
291
|
-
<details><summary>More</summary>
|
|
292
|
-
|
|
293
222
|
- 🔥 **[2025.04.29]** Added Qwen3 Evaluation Best Practices, [welcome to read 📖](https://evalscope.readthedocs.io/en/latest/best_practice/qwen3.html)
|
|
294
223
|
- 🔥 **[2025.04.27]** Support for text-to-image evaluation: Supports 8 metrics including MPS, HPSv2.1Score, etc., and evaluation benchmarks such as EvalMuse, GenAI-Bench. Refer to the [user documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/t2i.html) for more details.
|
|
295
224
|
- 🔥 **[2025.04.10]** Model service stress testing tool now supports the `/v1/completions` endpoint (the default endpoint for vLLM benchmarking)
|
|
296
225
|
- 🔥 **[2025.04.08]** Support for evaluating embedding model services compatible with the OpenAI API has been added. For more details, check the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html#configure-evaluation-parameters).
|
|
297
|
-
- 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
|
|
226
|
+
- 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
|
|
298
227
|
- 🔥 **[2025.03.20]** The model inference service stress testing now supports generating prompts of specified length using random values. Refer to the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#using-the-random-dataset) for more details.
|
|
299
228
|
- 🔥 **[2025.03.13]** Added support for the [LiveCodeBench](https://www.modelscope.cn/datasets/AI-ModelScope/code_generation_lite/summary) code evaluation benchmark, which can be used by specifying `live_code_bench`. Supports evaluating QwQ-32B on LiveCodeBench, refer to the [best practices](https://evalscope.readthedocs.io/en/latest/best_practice/eval_qwq.html).
|
|
300
229
|
- 🔥 **[2025.03.11]** Added support for the [SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/SimpleQA/summary) and [Chinese SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/Chinese-SimpleQA/summary) evaluation benchmarks. These are used to assess the factual accuracy of models, and you can specify `simple_qa` and `chinese_simpleqa` for use. Support for specifying a judge model is also available. For more details, refer to the [relevant parameter documentation](https://evalscope.readthedocs.io/en/latest/get_started/parameters.html).
|
|
@@ -326,96 +255,71 @@ Please scan the QR code below to join our community groups:
|
|
|
326
255
|
|
|
327
256
|
</details>
|
|
328
257
|
|
|
329
|
-
##
|
|
258
|
+
## ❤️ Community & Support
|
|
330
259
|
|
|
331
|
-
|
|
260
|
+
Welcome to join our community to communicate with other developers and get help.
|
|
332
261
|
|
|
333
|
-
|
|
262
|
+
[Discord Group](https://discord.com/invite/D27yfEFVz5) | WeChat Group | DingTalk Group
|
|
263
|
+
:-------------------------:|:-------------------------:|:-------------------------:
|
|
264
|
+
<img src="docs/asset/discord_qr.jpg" width="160" height="160"> | <img src="docs/asset/wechat.png" width="160" height="160"> | <img src="docs/asset/dingding.png" width="160" height="160">
|
|
334
265
|
|
|
335
|
-
1. Create a conda environment (optional)
|
|
336
|
-
```shell
|
|
337
|
-
# Python 3.10 is recommended
|
|
338
|
-
conda create -n evalscope python=3.10
|
|
339
266
|
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
```
|
|
347
|
-
3. Install additional dependencies (optional)
|
|
348
|
-
- To use model service inference benchmarking features, install the perf dependency:
|
|
267
|
+
|
|
268
|
+
## 🛠️ Environment Setup
|
|
269
|
+
|
|
270
|
+
We recommend using `conda` to create a virtual environment and install with `pip`.
|
|
271
|
+
|
|
272
|
+
1. **Create and Activate Conda Environment** (Python 3.10 recommended)
|
|
349
273
|
```shell
|
|
350
|
-
|
|
274
|
+
conda create -n evalscope python=3.10
|
|
275
|
+
conda activate evalscope
|
|
351
276
|
```
|
|
352
|
-
|
|
277
|
+
|
|
278
|
+
2. **Install EvalScope**
|
|
279
|
+
|
|
280
|
+
- **Method 1: Install via PyPI (Recommended)**
|
|
281
|
+
```shell
|
|
282
|
+
pip install evalscope
|
|
283
|
+
```
|
|
284
|
+
|
|
285
|
+
- **Method 2: Install from Source (For Development)**
|
|
286
|
+
```shell
|
|
287
|
+
git clone https://github.com/modelscope/evalscope.git
|
|
288
|
+
cd evalscope
|
|
289
|
+
pip install -e .
|
|
290
|
+
```
|
|
291
|
+
|
|
292
|
+
3. **Install Additional Dependencies** (Optional)
|
|
293
|
+
Install corresponding feature extensions according to your needs:
|
|
353
294
|
```shell
|
|
295
|
+
# Performance testing
|
|
296
|
+
pip install 'evalscope[perf]'
|
|
297
|
+
|
|
298
|
+
# Visualization App
|
|
354
299
|
pip install 'evalscope[app]'
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
```shell
|
|
300
|
+
|
|
301
|
+
# Other evaluation backends
|
|
358
302
|
pip install 'evalscope[opencompass]'
|
|
359
303
|
pip install 'evalscope[vlmeval]'
|
|
360
304
|
pip install 'evalscope[rag]'
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
```shell
|
|
305
|
+
|
|
306
|
+
# Install all dependencies
|
|
364
307
|
pip install 'evalscope[all]'
|
|
365
308
|
```
|
|
309
|
+
> If you installed from source, please replace `evalscope` with `.`, for example `pip install '.[perf]'`.
|
|
366
310
|
|
|
367
311
|
> [!NOTE]
|
|
368
|
-
>
|
|
369
|
-
> ```shell
|
|
370
|
-
> pip install llmuses<=0.4.3
|
|
371
|
-
> ```
|
|
372
|
-
> Then, import related dependencies using `llmuses`:
|
|
373
|
-
> ```python
|
|
374
|
-
> from llmuses import ...
|
|
375
|
-
> ```
|
|
376
|
-
|
|
377
|
-
### Method 2. Install from source
|
|
378
|
-
|
|
379
|
-
Installing from source allows you to use the latest code and makes it easier for further development and debugging.
|
|
380
|
-
|
|
381
|
-
1. Clone the source code
|
|
382
|
-
```shell
|
|
383
|
-
git clone https://github.com/modelscope/evalscope.git
|
|
384
|
-
```
|
|
385
|
-
2. Install dependencies
|
|
386
|
-
```shell
|
|
387
|
-
cd evalscope/
|
|
388
|
-
|
|
389
|
-
pip install -e .
|
|
390
|
-
```
|
|
391
|
-
3. Install additional dependencies
|
|
392
|
-
- To use model service inference benchmarking features, install the perf dependency:
|
|
393
|
-
```shell
|
|
394
|
-
pip install '.[perf]'
|
|
395
|
-
```
|
|
396
|
-
- To use visualization features, install the app dependency:
|
|
397
|
-
```shell
|
|
398
|
-
pip install '.[app]'
|
|
399
|
-
```
|
|
400
|
-
- If you need to use other evaluation backends, you can install OpenCompass, VLMEvalKit, or RAGEval as needed:
|
|
401
|
-
```shell
|
|
402
|
-
pip install '.[opencompass]'
|
|
403
|
-
pip install '.[vlmeval]'
|
|
404
|
-
pip install '.[rag]'
|
|
405
|
-
```
|
|
406
|
-
- To install all dependencies:
|
|
407
|
-
```shell
|
|
408
|
-
pip install '.[all]'
|
|
409
|
-
```
|
|
312
|
+
> This project was formerly known as `llmuses`. If you need to use `v0.4.3` or earlier versions, please run `pip install llmuses<=0.4.3` and use `from llmuses import ...` for imports.
|
|
410
313
|
|
|
411
314
|
|
|
412
315
|
## 🚀 Quick Start
|
|
413
316
|
|
|
414
|
-
|
|
317
|
+
You can start evaluation tasks in two ways: **command line** or **Python code**.
|
|
415
318
|
|
|
416
319
|
### Method 1. Using Command Line
|
|
417
320
|
|
|
418
|
-
Execute the `eval` command in any
|
|
321
|
+
Execute the `evalscope eval` command in any path to start evaluation. The following command will evaluate the `Qwen/Qwen2.5-0.5B-Instruct` model on `gsm8k` and `arc` datasets, taking only 5 samples from each dataset.
|
|
322
|
+
|
|
419
323
|
```bash
|
|
420
324
|
evalscope eval \
|
|
421
325
|
--model Qwen/Qwen2.5-0.5B-Instruct \
|
|
@@ -425,22 +329,23 @@ evalscope eval \
|
|
|
425
329
|
|
|
426
330
|
### Method 2. Using Python Code
|
|
427
331
|
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
**Using `TaskConfig`**
|
|
332
|
+
Use the `run_task` function and `TaskConfig` object to configure and start evaluation tasks.
|
|
431
333
|
|
|
432
334
|
```python
|
|
433
335
|
from evalscope import run_task, TaskConfig
|
|
434
336
|
|
|
337
|
+
# Configure evaluation task
|
|
435
338
|
task_cfg = TaskConfig(
|
|
436
339
|
model='Qwen/Qwen2.5-0.5B-Instruct',
|
|
437
340
|
datasets=['gsm8k', 'arc'],
|
|
438
341
|
limit=5
|
|
439
342
|
)
|
|
440
343
|
|
|
441
|
-
|
|
344
|
+
# Start evaluation
|
|
345
|
+
run_task(task_cfg)
|
|
442
346
|
```
|
|
443
|
-
|
|
347
|
+
|
|
348
|
+
<details><summary><b>💡 Tip:</b> `run_task` also supports dictionaries, YAML or JSON files as configuration.</summary>
|
|
444
349
|
|
|
445
350
|
**Using Python Dictionary**
|
|
446
351
|
|
|
@@ -452,13 +357,10 @@ task_cfg = {
|
|
|
452
357
|
'datasets': ['gsm8k', 'arc'],
|
|
453
358
|
'limit': 5
|
|
454
359
|
}
|
|
455
|
-
|
|
456
360
|
run_task(task_cfg=task_cfg)
|
|
457
361
|
```
|
|
458
362
|
|
|
459
|
-
**Using `yaml`
|
|
460
|
-
|
|
461
|
-
`config.yaml`:
|
|
363
|
+
**Using YAML File** (`config.yaml`)
|
|
462
364
|
```yaml
|
|
463
365
|
model: Qwen/Qwen2.5-0.5B-Instruct
|
|
464
366
|
datasets:
|
|
@@ -466,37 +368,15 @@ datasets:
|
|
|
466
368
|
- arc
|
|
467
369
|
limit: 5
|
|
468
370
|
```
|
|
469
|
-
|
|
470
371
|
```python
|
|
471
372
|
from evalscope.run import run_task
|
|
472
373
|
|
|
473
374
|
run_task(task_cfg="config.yaml")
|
|
474
375
|
```
|
|
475
|
-
|
|
476
|
-
**Using `json` file**
|
|
477
|
-
|
|
478
|
-
`config.json`:
|
|
479
|
-
```json
|
|
480
|
-
{
|
|
481
|
-
"model": "Qwen/Qwen2.5-0.5B-Instruct",
|
|
482
|
-
"datasets": ["gsm8k", "arc"],
|
|
483
|
-
"limit": 5
|
|
484
|
-
}
|
|
485
|
-
```
|
|
486
|
-
|
|
487
|
-
```python
|
|
488
|
-
from evalscope.run import run_task
|
|
489
|
-
|
|
490
|
-
run_task(task_cfg="config.json")
|
|
491
|
-
```
|
|
492
376
|
</details>
|
|
493
377
|
|
|
494
|
-
### Basic Parameter
|
|
495
|
-
- `--model`: Specifies the `model_id` of the model in [ModelScope](https://modelscope.cn/), which can be automatically downloaded, e.g., [Qwen/Qwen2.5-0.5B-Instruct](https://modelscope.cn/models/Qwen/Qwen2.5-0.5B-Instruct/summary); or use the local path of the model, e.g., `/path/to/model`
|
|
496
|
-
- `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
|
|
497
|
-
- `--limit`: Maximum amount of evaluation data for each dataset. If not specified, it defaults to evaluating all data. Can be used for quick validation
|
|
498
|
-
|
|
499
378
|
### Output Results
|
|
379
|
+
After evaluation completion, you will see a report in the terminal in the following format:
|
|
500
380
|
```text
|
|
501
381
|
+-----------------------+----------------+-----------------+-----------------+---------------+-------+---------+
|
|
502
382
|
| Model Name | Dataset Name | Metric Name | Category Name | Subset Name | Num | Score |
|
|
@@ -509,164 +389,140 @@ run_task(task_cfg="config.json")
|
|
|
509
389
|
+-----------------------+----------------+-----------------+-----------------+---------------+-------+---------+
|
|
510
390
|
```
|
|
511
391
|
|
|
512
|
-
## 📈
|
|
513
|
-
|
|
514
|
-
1. Install the dependencies required for visualization, including gradio, plotly, etc.
|
|
515
|
-
```bash
|
|
516
|
-
pip install 'evalscope[app]'
|
|
517
|
-
```
|
|
518
|
-
|
|
519
|
-
2. Start the Visualization Service
|
|
520
|
-
|
|
521
|
-
Run the following command to start the visualization service.
|
|
522
|
-
```bash
|
|
523
|
-
evalscope app
|
|
524
|
-
```
|
|
525
|
-
You can access the visualization service in the browser if the following output appears.
|
|
526
|
-
```text
|
|
527
|
-
* Running on local URL: http://127.0.0.1:7861
|
|
528
|
-
|
|
529
|
-
To create a public link, set `share=True` in `launch()`.
|
|
530
|
-
```
|
|
531
|
-
|
|
532
|
-
<table>
|
|
533
|
-
<tr>
|
|
534
|
-
<td style="text-align: center;">
|
|
535
|
-
<img src="docs/en/get_started/images/setting.png" alt="Setting" style="width: 75%;" />
|
|
536
|
-
<p>Setting Interface</p>
|
|
537
|
-
</td>
|
|
538
|
-
<td style="text-align: center;">
|
|
539
|
-
<img src="docs/en/get_started/images/model_compare.png" alt="Model Compare" style="width: 100%;" />
|
|
540
|
-
<p>Model Comparison</p>
|
|
541
|
-
</td>
|
|
542
|
-
</tr>
|
|
543
|
-
<tr>
|
|
544
|
-
<td style="text-align: center;">
|
|
545
|
-
<img src="docs/en/get_started/images/report_overview.png" alt="Report Overview" style="width: 100%;" />
|
|
546
|
-
<p>Report Overview</p>
|
|
547
|
-
</td>
|
|
548
|
-
<td style="text-align: center;">
|
|
549
|
-
<img src="docs/en/get_started/images/report_details.png" alt="Report Details" style="width: 80%;" />
|
|
550
|
-
<p>Report Details</p>
|
|
551
|
-
</td>
|
|
552
|
-
</tr>
|
|
553
|
-
</table>
|
|
554
|
-
|
|
555
|
-
For more details, refer to: [📖 Visualization of Evaluation Results](https://evalscope.readthedocs.io/en/latest/get_started/visualization.html)
|
|
556
|
-
|
|
557
|
-
## 🌐 Evaluation of Model API
|
|
558
|
-
|
|
559
|
-
Specify the model API service address (api_url) and API Key (api_key) to evaluate the deployed model API service. In this case, the `eval-type` parameter must be specified as `service`, for example:
|
|
560
|
-
|
|
561
|
-
For example, to launch a model service using [vLLM](https://github.com/vllm-project/vllm):
|
|
562
|
-
|
|
563
|
-
```shell
|
|
564
|
-
export VLLM_USE_MODELSCOPE=True && python -m vllm.entrypoints.openai.api_server --model Qwen/Qwen2.5-0.5B-Instruct --served-model-name qwen2.5 --trust_remote_code --port 8801
|
|
565
|
-
```
|
|
566
|
-
Then, you can use the following command to evaluate the model API service:
|
|
567
|
-
```shell
|
|
568
|
-
evalscope eval \
|
|
569
|
-
--model qwen2.5 \
|
|
570
|
-
--api-url http://127.0.0.1:8801/v1 \
|
|
571
|
-
--api-key EMPTY \
|
|
572
|
-
--eval-type service \
|
|
573
|
-
--datasets gsm8k \
|
|
574
|
-
--limit 10
|
|
575
|
-
```
|
|
392
|
+
## 📈 Advanced Usage
|
|
576
393
|
|
|
577
|
-
|
|
394
|
+
### Custom Evaluation Parameters
|
|
578
395
|
|
|
579
|
-
|
|
396
|
+
You can fine-tune model loading, inference, and dataset configuration through command line parameters.
|
|
580
397
|
|
|
581
398
|
```shell
|
|
582
399
|
evalscope eval \
|
|
583
400
|
--model Qwen/Qwen3-0.6B \
|
|
584
401
|
--model-args '{"revision": "master", "precision": "torch.float16", "device_map": "auto"}' \
|
|
585
|
-
--generation-config '{"do_sample":true,"temperature":0.6,"
|
|
402
|
+
--generation-config '{"do_sample":true,"temperature":0.6,"max_tokens":512}' \
|
|
586
403
|
--dataset-args '{"gsm8k": {"few_shot_num": 0, "few_shot_random": false}}' \
|
|
587
404
|
--datasets gsm8k \
|
|
588
405
|
--limit 10
|
|
589
406
|
```
|
|
590
407
|
|
|
591
|
-
|
|
592
|
-
- `--
|
|
593
|
-
|
|
594
|
-
- `precision`: Model precision
|
|
595
|
-
- `device_map`: Device allocation for the model
|
|
596
|
-
- `--generation-config`: Generation parameters, passed as a JSON string and parsed as a dictionary:
|
|
597
|
-
- `do_sample`: Whether to use sampling
|
|
598
|
-
- `temperature`: Generation temperature
|
|
599
|
-
- `max_new_tokens`: Maximum length of generated tokens
|
|
600
|
-
- `chat_template_kwargs`: Model inference template parameters
|
|
601
|
-
- `--dataset-args`: Settings for the evaluation dataset, passed as a JSON string where the key is the dataset name and the value is the parameters. Note that these need to correspond one-to-one with the values in the `--datasets` parameter:
|
|
602
|
-
- `few_shot_num`: Number of few-shot examples
|
|
603
|
-
- `few_shot_random`: Whether to randomly sample few-shot data; if not set, defaults to `true`
|
|
408
|
+
- `--model-args`: Model loading parameters such as `revision`, `precision`, etc.
|
|
409
|
+
- `--generation-config`: Model generation parameters such as `temperature`, `max_tokens`, etc.
|
|
410
|
+
- `--dataset-args`: Dataset configuration parameters such as `few_shot_num`, etc.
|
|
604
411
|
|
|
605
|
-
|
|
412
|
+
For details, please refer to [📖 Complete Parameter Guide](https://evalscope.readthedocs.io/en/latest/get_started/parameters.html).
|
|
606
413
|
|
|
414
|
+
### Evaluating Online Model APIs
|
|
607
415
|
|
|
608
|
-
|
|
609
|
-
EvalScope supports using third-party evaluation frameworks to initiate evaluation tasks, which we call Evaluation Backend. Currently supported Evaluation Backend includes:
|
|
610
|
-
- **Native**: EvalScope's own **default evaluation framework**, supporting various evaluation modes including single model evaluation, arena mode, and baseline model comparison mode.
|
|
611
|
-
- [OpenCompass](https://github.com/open-compass/opencompass): Initiate OpenCompass evaluation tasks through EvalScope. Lightweight, easy to customize, supports seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/opencompass_backend.html)
|
|
612
|
-
- [VLMEvalKit](https://github.com/open-compass/VLMEvalKit): Initiate VLMEvalKit multimodal evaluation tasks through EvalScope. Supports various multimodal models and datasets, and offers seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/vlmevalkit_backend.html)
|
|
613
|
-
- **RAGEval**: Initiate RAG evaluation tasks through EvalScope, supporting independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html): [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/index.html)
|
|
614
|
-
- **ThirdParty**: Third-party evaluation tasks, such as [ToolBench](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) and [LongBench-Write](https://evalscope.readthedocs.io/en/latest/third_party/longwriter.html).
|
|
416
|
+
EvalScope supports evaluating model services deployed via APIs (such as services deployed with vLLM). Simply specify the service address and API Key.
|
|
615
417
|
|
|
418
|
+
1. **Start Model Service** (using vLLM as example)
|
|
419
|
+
```shell
|
|
420
|
+
export VLLM_USE_MODELSCOPE=True
|
|
421
|
+
python -m vllm.entrypoints.openai.api_server \
|
|
422
|
+
--model Qwen/Qwen2.5-0.5B-Instruct \
|
|
423
|
+
--served-model-name qwen2.5 \
|
|
424
|
+
--port 8801
|
|
425
|
+
```
|
|
616
426
|
|
|
617
|
-
|
|
618
|
-
|
|
427
|
+
2. **Run Evaluation**
|
|
428
|
+
```shell
|
|
429
|
+
evalscope eval \
|
|
430
|
+
--model qwen2.5 \
|
|
431
|
+
--eval-type service \
|
|
432
|
+
--api-url http://127.0.0.1:8801/v1 \
|
|
433
|
+
--api-key EMPTY \
|
|
434
|
+
--datasets gsm8k \
|
|
435
|
+
--limit 10
|
|
436
|
+
```
|
|
619
437
|
|
|
620
|
-
|
|
438
|
+
### ⚔️ Arena Mode
|
|
621
439
|
|
|
622
|
-
|
|
440
|
+
Arena mode evaluates model performance through pairwise battles between models, providing win rates and rankings, perfect for horizontal comparison of multiple models.
|
|
623
441
|
|
|
624
|
-
|
|
442
|
+
```text
|
|
443
|
+
# Example evaluation results
|
|
444
|
+
Model WinRate (%) CI (%)
|
|
445
|
+
------------ ------------- ---------------
|
|
446
|
+
qwen2.5-72b 69.3 (-13.3 / +12.2)
|
|
447
|
+
qwen2.5-7b 50 (+0.0 / +0.0)
|
|
448
|
+
qwen2.5-0.5b 4.7 (-2.5 / +4.4)
|
|
449
|
+
```
|
|
450
|
+
For details, please refer to [📖 Arena Mode Usage Guide](https://evalscope.readthedocs.io/en/latest/user_guides/arena.html).
|
|
625
451
|
|
|
452
|
+
### 🖊️ Custom Dataset Evaluation
|
|
626
453
|
|
|
627
|
-
|
|
454
|
+
EvalScope allows you to easily add and evaluate your own datasets. For details, please refer to [📖 Custom Dataset Evaluation Guide](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset/index.html).
|
|
628
455
|
|
|
629
|
-

|
|
630
456
|
|
|
631
|
-
|
|
457
|
+
## 🧪 Other Evaluation Backends
|
|
458
|
+
EvalScope supports launching evaluation tasks through third-party evaluation frameworks (we call them "backends") to meet diverse evaluation needs.
|
|
632
459
|
|
|
633
|
-
|
|
460
|
+
- **Native**: EvalScope's default evaluation framework with comprehensive functionality.
|
|
461
|
+
- **OpenCompass**: Focuses on text-only evaluation. [📖 Usage Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/opencompass_backend.html)
|
|
462
|
+
- **VLMEvalKit**: Focuses on multi-modal evaluation. [📖 Usage Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/vlmevalkit_backend.html)
|
|
463
|
+
- **RAGEval**: Focuses on RAG evaluation, supporting Embedding and Reranker models. [📖 Usage Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/index.html)
|
|
464
|
+
- **Third-party Evaluation Tools**: Supports evaluation tasks like [ToolBench](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html).
|
|
634
465
|
|
|
635
|
-
|
|
466
|
+
## ⚡ Inference Performance Evaluation Tool
|
|
467
|
+
EvalScope provides a powerful stress testing tool for evaluating the performance of large language model services.
|
|
636
468
|
|
|
637
|
-
|
|
469
|
+
- **Key Metrics**: Supports throughput (Tokens/s), first token latency (TTFT), token generation latency (TPOT), etc.
|
|
470
|
+
- **Result Recording**: Supports recording results to `wandb` and `swanlab`.
|
|
471
|
+
- **Speed Benchmarks**: Can generate speed benchmark results similar to official reports.
|
|
638
472
|
|
|
639
|
-
|
|
640
|
-
Speed Benchmark Results:
|
|
641
|
-
+---------------+-----------------+----------------+
|
|
642
|
-
| Prompt Tokens | Speed(tokens/s) | GPU Memory(GB) |
|
|
643
|
-
+---------------+-----------------+----------------+
|
|
644
|
-
| 1 | 50.69 | 0.97 |
|
|
645
|
-
| 6144 | 51.36 | 1.23 |
|
|
646
|
-
| 14336 | 49.93 | 1.59 |
|
|
647
|
-
| 30720 | 49.56 | 2.34 |
|
|
648
|
-
+---------------+-----------------+----------------+
|
|
649
|
-
```
|
|
473
|
+
For details, please refer to [📖 Performance Testing Usage Guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/index.html).
|
|
650
474
|
|
|
651
|
-
|
|
652
|
-
|
|
475
|
+
Example output is shown below:
|
|
476
|
+
<p align="center">
|
|
477
|
+
<img src="docs/en/user_guides/stress_test/images/multi_perf.png" style="width: 80%;">
|
|
478
|
+
</p>
|
|
653
479
|
|
|
654
480
|
|
|
655
|
-
##
|
|
481
|
+
## 📊 Visualizing Evaluation Results
|
|
656
482
|
|
|
657
|
-
|
|
483
|
+
EvalScope provides a Gradio-based WebUI for interactive analysis and comparison of evaluation results.
|
|
658
484
|
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
```
|
|
485
|
+
1. **Install Dependencies**
|
|
486
|
+
```bash
|
|
487
|
+
pip install 'evalscope[app]'
|
|
488
|
+
```
|
|
489
|
+
|
|
490
|
+
2. **Start Service**
|
|
491
|
+
```bash
|
|
492
|
+
evalscope app
|
|
493
|
+
```
|
|
494
|
+
Visit `http://127.0.0.1:7861` to open the visualization interface.
|
|
495
|
+
|
|
496
|
+
<table>
|
|
497
|
+
<tr>
|
|
498
|
+
<td style="text-align: center;">
|
|
499
|
+
<img src="docs/en/get_started/images/setting.png" alt="Setting" style="width: 85%;" />
|
|
500
|
+
<p>Settings Interface</p>
|
|
501
|
+
</td>
|
|
502
|
+
<td style="text-align: center;">
|
|
503
|
+
<img src="docs/en/get_started/images/model_compare.png" alt="Model Compare" style="width: 100%;" />
|
|
504
|
+
<p>Model Comparison</p>
|
|
505
|
+
</td>
|
|
506
|
+
</tr>
|
|
507
|
+
<tr>
|
|
508
|
+
<td style="text-align: center;">
|
|
509
|
+
<img src="docs/en/get_started/images/report_overview.png" alt="Report Overview" style="width: 100%;" />
|
|
510
|
+
<p>Report Overview</p>
|
|
511
|
+
</td>
|
|
512
|
+
<td style="text-align: center;">
|
|
513
|
+
<img src="docs/en/get_started/images/report_details.png" alt="Report Details" style="width: 85%;" />
|
|
514
|
+
<p>Report Details</p>
|
|
515
|
+
</td>
|
|
516
|
+
</tr>
|
|
517
|
+
</table>
|
|
518
|
+
|
|
519
|
+
For details, please refer to [📖 Visualizing Evaluation Results](https://evalscope.readthedocs.io/en/latest/get_started/visualization.html).
|
|
666
520
|
|
|
667
|
-
## 👷♂️
|
|
521
|
+
## 👷♂️ Contributing
|
|
668
522
|
|
|
669
|
-
|
|
523
|
+
We welcome any contributions from the community! If you want to add new evaluation benchmarks, models, or features, please refer to our [Contributing Guide](https://evalscope.readthedocs.io/en/latest/advanced_guides/add_benchmark.html).
|
|
524
|
+
|
|
525
|
+
Thanks to all developers who have contributed to EvalScope!
|
|
670
526
|
|
|
671
527
|
<a href="https://github.com/modelscope/evalscope/graphs/contributors" target="_blank">
|
|
672
528
|
<table>
|
|
@@ -678,8 +534,10 @@ EvalScope, as the official evaluation tool of [ModelScope](https://modelscope.cn
|
|
|
678
534
|
</table>
|
|
679
535
|
</a>
|
|
680
536
|
|
|
537
|
+
|
|
681
538
|
## 📚 Citation
|
|
682
539
|
|
|
540
|
+
If you use EvalScope in your research, please cite our work:
|
|
683
541
|
```bibtex
|
|
684
542
|
@misc{evalscope_2024,
|
|
685
543
|
title={{EvalScope}: Evaluation Framework for Large Models},
|
|
@@ -689,20 +547,6 @@ EvalScope, as the official evaluation tool of [ModelScope](https://modelscope.cn
|
|
|
689
547
|
}
|
|
690
548
|
```
|
|
691
549
|
|
|
692
|
-
## 🔜 Roadmap
|
|
693
|
-
- [x] Support for better evaluation report visualization
|
|
694
|
-
- [x] Support for mixed evaluations across multiple datasets
|
|
695
|
-
- [x] RAG evaluation
|
|
696
|
-
- [x] VLM evaluation
|
|
697
|
-
- [x] Agents evaluation
|
|
698
|
-
- [x] vLLM
|
|
699
|
-
- [ ] Distributed evaluating
|
|
700
|
-
- [x] Multi-modal evaluation
|
|
701
|
-
- [ ] Benchmarks
|
|
702
|
-
- [x] BFCL-v3
|
|
703
|
-
- [x] GPQA
|
|
704
|
-
- [x] MBPP
|
|
705
|
-
|
|
706
550
|
|
|
707
551
|
## ⭐ Star History
|
|
708
552
|
|