evalscope 0.5.5rc1__tar.gz → 0.6.0rc0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of evalscope might be problematic. Click here for more details.
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/PKG-INFO +40 -44
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/README.md +22 -33
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/backend/opencompass/tasks/eval_datasets.py +1 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/__init__.py +4 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/backend_manager.py +80 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/clip_benchmark/__init__.py +2 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/clip_benchmark/arguments.py +34 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +277 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/clip_benchmark/task_template.py +119 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/clip_benchmark/tasks/image_caption.py +83 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_classification.py +247 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_retrieval.py +170 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/cmteb/__init__.py +4 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/cmteb/arguments.py +61 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/cmteb/base.py +91 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/cmteb/task_template.py +85 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/cmteb/tasks/Classification.py +302 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/cmteb/tasks/Clustering.py +252 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/cmteb/tasks/CustomTask.py +61 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/cmteb/tasks/PairClassification.py +113 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/cmteb/tasks/Reranking.py +150 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/cmteb/tasks/Retrieval.py +345 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/cmteb/tasks/STS.py +302 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/cmteb/tasks/__init__.py +70 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/ragas/__init__.py +2 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/ragas/arguments.py +47 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/ragas/metrics/__init__.py +2 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/ragas/metrics/multi_modal_faithfulness.py +91 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/ragas/metrics/multi_modal_relevance.py +99 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/ragas/task_template.py +61 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/ragas/tasks/__init__.py +2 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +263 -0
- evalscope-0.6.0rc0/evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +72 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/backend/vlm_eval_kit/backend_manager.py +0 -1
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/backend/vlm_eval_kit/custom_dataset.py +1 -1
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/evaluator/evaluator.py +1 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/models/api/openai_api.py +2 -2
- evalscope-0.6.0rc0/evalscope/perf/datasets/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/perf/http_client.py +1 -1
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/perf/openai_api.py +2 -0
- evalscope-0.6.0rc0/evalscope/preprocess/tokenizers/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/run.py +4 -0
- evalscope-0.6.0rc0/evalscope/utils/logger.py +94 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/utils/task_utils.py +3 -0
- evalscope-0.6.0rc0/evalscope/version.py +4 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope.egg-info/PKG-INFO +40 -44
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope.egg-info/SOURCES.txt +31 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope.egg-info/requires.txt +18 -10
- evalscope-0.5.5rc1/evalscope/backend/opencompass/__init__.py +0 -3
- evalscope-0.5.5rc1/evalscope/utils/logger.py +0 -64
- evalscope-0.5.5rc1/evalscope/version.py +0 -4
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/__init__.py +0 -0
- {evalscope-0.5.5rc1/evalscope/perf → evalscope-0.6.0rc0/evalscope/backend}/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/backend/base.py +0 -0
- {evalscope-0.5.5rc1/evalscope/backend → evalscope-0.6.0rc0/evalscope/backend/opencompass}/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/backend/opencompass/api_meta_template.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/backend/opencompass/backend_manager.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/backend/opencompass/tasks/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/backend/opencompass/tasks/eval_api.py +0 -0
- {evalscope-0.5.5rc1/evalscope/perf/datasets → evalscope-0.6.0rc0/evalscope/backend/rag_eval/clip_benchmark/tasks}/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/backend/vlm_eval_kit/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/arc/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/arc/ai2_arc.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/arc/arc_adapter.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/bbh_adapter.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/boolean_expressions.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/causal_judgement.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/date_understanding.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/disambiguation_qa.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/dyck_languages.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/formal_fallacies.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/geometric_shapes.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/hyperbaton.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_five_objects.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_seven_objects.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_three_objects.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/movie_recommendation.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/multistep_arithmetic_two.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/navigate.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/object_counting.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/penguins_in_a_table.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/reasoning_about_colored_objects.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/ruin_names.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/salient_translation_error_detection.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/snarks.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/sports_understanding.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/temporal_sequences.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_five_objects.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_seven_objects.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_three_objects.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/web_of_lies.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/bbh/cot_prompts/word_sorting.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/benchmark.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/ceval/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/ceval/ceval_adapter.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/ceval/ceval_exam.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/cmmlu/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/cmmlu/cmmlu.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/cmmlu/cmmlu_adapter.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/competition_math/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/competition_math/competition_math.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/competition_math/competition_math_adapter.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/data_adapter.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/general_qa/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/general_qa/general_qa_adapter.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/gsm8k/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/gsm8k/gsm8k.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/gsm8k/gsm8k_adapter.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/hellaswag/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/hellaswag/hellaswag.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/hellaswag/hellaswag_adapter.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/humaneval/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/humaneval/humaneval.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/humaneval/humaneval_adapter.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/mmlu/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/mmlu/mmlu.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/mmlu/mmlu_adapter.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/race/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/race/race.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/race/race_adapter.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/trivia_qa/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/truthful_qa/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/cache.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/cli/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/cli/base.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/cli/cli.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/cli/start_perf.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/cli/start_server.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/config.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/constants.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/evaluator/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/evaluator/rating_eval.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/evaluator/reviewer/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/evaluator/reviewer/auto_reviewer.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/metrics/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/metrics/bundled_rouge_score/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/metrics/bundled_rouge_score/rouge_scorer.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/metrics/code_metric.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/metrics/math_accuracy.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/metrics/metrics.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/metrics/rouge_metric.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/models/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/models/api/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/models/custom/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/models/custom/custom_model.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/models/dummy_chat_model.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/models/model.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/models/model_adapter.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/models/openai_model.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/models/template.py +0 -0
- {evalscope-0.5.5rc1/evalscope/preprocess/tokenizers → evalscope-0.6.0rc0/evalscope/perf}/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/perf/_logging.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/perf/api_plugin_base.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/perf/custom_api.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/perf/dashscope_api.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/perf/dataset_plugin_base.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/perf/datasets/line_by_line.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/perf/datasets/longalpaca_12k.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/perf/datasets/openqa.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/perf/how_to_analysis_result.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/perf/plugin_registry.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/perf/query_parameters.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/perf/server_sent_event.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/preprocess/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/preprocess/tokenizers/gpt2_tokenizer.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/registry/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/registry/tasks/arc.yaml +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/registry/tasks/bbh.yaml +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/registry/tasks/bbh_mini.yaml +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/registry/tasks/ceval.yaml +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/registry/tasks/ceval_mini.yaml +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/registry/tasks/cmmlu.yaml +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/registry/tasks/eval_qwen-7b-chat_v100.yaml +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/registry/tasks/general_qa.yaml +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/registry/tasks/gsm8k.yaml +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/registry/tasks/mmlu.yaml +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/registry/tasks/mmlu_mini.yaml +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/run_arena.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/run_ms.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/summarizer.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/third_party/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/third_party/longbench_write/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/third_party/longbench_write/eval.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/third_party/longbench_write/infer.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/third_party/longbench_write/longbench_write.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/third_party/longbench_write/resources/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/third_party/longbench_write/resources/judge.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/third_party/longbench_write/resources/longbench_write.jsonl +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/third_party/longbench_write/resources/longbench_write_en.jsonl +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/third_party/longbench_write/resources/longwrite_ruler.jsonl +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/third_party/longbench_write/tools/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/third_party/longbench_write/tools/data_etl.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/third_party/longbench_write/utils.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/third_party/toolbench_static/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/third_party/toolbench_static/eval.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/third_party/toolbench_static/infer.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/third_party/toolbench_static/llm/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/third_party/toolbench_static/toolbench_static.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/tools/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/tools/combine_reports.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/tools/gen_mmlu_subject_mapping.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/tools/rewrite_eval_results.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/utils/__init__.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/utils/arena_utils.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/utils/completion_parsers.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/utils/task_cfg_parser.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/utils/utils.py +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope.egg-info/dependency_links.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope.egg-info/entry_points.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope.egg-info/not-zip-safe +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope.egg-info/top_level.txt +0 -0
- {evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: evalscope
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.6.0rc0
|
|
4
4
|
Summary: EvalScope: Lightweight LLMs Evaluation Framework
|
|
5
5
|
Home-page: https://github.com/modelscope/evalscope
|
|
6
6
|
Author: ModelScope team
|
|
@@ -19,22 +19,22 @@ Requires-Dist: torch
|
|
|
19
19
|
Requires-Dist: absl-py
|
|
20
20
|
Requires-Dist: accelerate
|
|
21
21
|
Requires-Dist: cachetools
|
|
22
|
-
Requires-Dist: datasets
|
|
22
|
+
Requires-Dist: datasets<=3.0.1,>=3.0.0
|
|
23
23
|
Requires-Dist: editdistance
|
|
24
24
|
Requires-Dist: jsonlines
|
|
25
25
|
Requires-Dist: matplotlib
|
|
26
26
|
Requires-Dist: modelscope[framework]
|
|
27
|
-
Requires-Dist: nltk
|
|
27
|
+
Requires-Dist: nltk>=3.9
|
|
28
28
|
Requires-Dist: openai
|
|
29
29
|
Requires-Dist: pandas
|
|
30
30
|
Requires-Dist: plotly
|
|
31
|
-
Requires-Dist: pyarrow
|
|
31
|
+
Requires-Dist: pyarrow<=17.0.0
|
|
32
32
|
Requires-Dist: pympler
|
|
33
33
|
Requires-Dist: pyyaml
|
|
34
34
|
Requires-Dist: regex
|
|
35
35
|
Requires-Dist: requests
|
|
36
36
|
Requires-Dist: requests-toolbelt
|
|
37
|
-
Requires-Dist: rouge-score
|
|
37
|
+
Requires-Dist: rouge-score>=0.1.0
|
|
38
38
|
Requires-Dist: sacrebleu
|
|
39
39
|
Requires-Dist: scikit-learn
|
|
40
40
|
Requires-Dist: seaborn
|
|
@@ -48,9 +48,13 @@ Requires-Dist: transformers_stream_generator
|
|
|
48
48
|
Requires-Dist: jieba
|
|
49
49
|
Requires-Dist: rouge-chinese
|
|
50
50
|
Provides-Extra: opencompass
|
|
51
|
-
Requires-Dist: ms-opencompass>=0.1.
|
|
51
|
+
Requires-Dist: ms-opencompass>=0.1.3; extra == "opencompass"
|
|
52
52
|
Provides-Extra: vlmeval
|
|
53
53
|
Requires-Dist: ms-vlmeval>=0.0.5; extra == "vlmeval"
|
|
54
|
+
Provides-Extra: rag
|
|
55
|
+
Requires-Dist: mteb==1.19.4; extra == "rag"
|
|
56
|
+
Requires-Dist: ragas==0.2.3; extra == "rag"
|
|
57
|
+
Requires-Dist: webdataset>0.2.0; extra == "rag"
|
|
54
58
|
Provides-Extra: inner
|
|
55
59
|
Requires-Dist: absl-py; extra == "inner"
|
|
56
60
|
Requires-Dist: accelerate; extra == "inner"
|
|
@@ -83,22 +87,22 @@ Requires-Dist: torch; extra == "all"
|
|
|
83
87
|
Requires-Dist: absl-py; extra == "all"
|
|
84
88
|
Requires-Dist: accelerate; extra == "all"
|
|
85
89
|
Requires-Dist: cachetools; extra == "all"
|
|
86
|
-
Requires-Dist: datasets
|
|
90
|
+
Requires-Dist: datasets<=3.0.1,>=3.0.0; extra == "all"
|
|
87
91
|
Requires-Dist: editdistance; extra == "all"
|
|
88
92
|
Requires-Dist: jsonlines; extra == "all"
|
|
89
93
|
Requires-Dist: matplotlib; extra == "all"
|
|
90
94
|
Requires-Dist: modelscope[framework]; extra == "all"
|
|
91
|
-
Requires-Dist: nltk; extra == "all"
|
|
95
|
+
Requires-Dist: nltk>=3.9; extra == "all"
|
|
92
96
|
Requires-Dist: openai; extra == "all"
|
|
93
97
|
Requires-Dist: pandas; extra == "all"
|
|
94
98
|
Requires-Dist: plotly; extra == "all"
|
|
95
|
-
Requires-Dist: pyarrow; extra == "all"
|
|
99
|
+
Requires-Dist: pyarrow<=17.0.0; extra == "all"
|
|
96
100
|
Requires-Dist: pympler; extra == "all"
|
|
97
101
|
Requires-Dist: pyyaml; extra == "all"
|
|
98
102
|
Requires-Dist: regex; extra == "all"
|
|
99
103
|
Requires-Dist: requests; extra == "all"
|
|
100
104
|
Requires-Dist: requests-toolbelt; extra == "all"
|
|
101
|
-
Requires-Dist: rouge-score; extra == "all"
|
|
105
|
+
Requires-Dist: rouge-score>=0.1.0; extra == "all"
|
|
102
106
|
Requires-Dist: sacrebleu; extra == "all"
|
|
103
107
|
Requires-Dist: scikit-learn; extra == "all"
|
|
104
108
|
Requires-Dist: seaborn; extra == "all"
|
|
@@ -111,14 +115,20 @@ Requires-Dist: transformers>=4.33; extra == "all"
|
|
|
111
115
|
Requires-Dist: transformers_stream_generator; extra == "all"
|
|
112
116
|
Requires-Dist: jieba; extra == "all"
|
|
113
117
|
Requires-Dist: rouge-chinese; extra == "all"
|
|
114
|
-
Requires-Dist: ms-opencompass>=0.1.
|
|
118
|
+
Requires-Dist: ms-opencompass>=0.1.3; extra == "all"
|
|
115
119
|
Requires-Dist: ms-vlmeval>=0.0.5; extra == "all"
|
|
120
|
+
Requires-Dist: mteb==1.19.4; extra == "all"
|
|
121
|
+
Requires-Dist: ragas==0.2.3; extra == "all"
|
|
122
|
+
Requires-Dist: webdataset>0.2.0; extra == "all"
|
|
116
123
|
|
|
117
|
-
English | [简体中文](README_zh.md)
|
|
118
124
|
|
|
119
125
|
|
|
120
126
|

|
|
121
127
|
|
|
128
|
+
<p align="center">
|
|
129
|
+
English | <a href="README_zh.md">简体中文</a>
|
|
130
|
+
</p>
|
|
131
|
+
|
|
122
132
|
<p align="center">
|
|
123
133
|
<a href="https://badge.fury.io/py/evalscope"><img src="https://badge.fury.io/py/evalscope.svg" alt="PyPI version" height="18"></a>
|
|
124
134
|
<a href="https://pypi.org/project/evalscope"><img alt="PyPI - Downloads" src="https://static.pepy.tech/badge/evalscope">
|
|
@@ -127,7 +137,7 @@ English | [简体中文](README_zh.md)
|
|
|
127
137
|
<img src='https://readthedocs.org/projects/evalscope-en/badge/?version=latest' alt='Documentation Status' />
|
|
128
138
|
</a>
|
|
129
139
|
<br>
|
|
130
|
-
<a href="https://evalscope.readthedocs.io/en/latest/"
|
|
140
|
+
<a href="https://evalscope.readthedocs.io/en/latest/">📖 Documents</a>
|
|
131
141
|
<p>
|
|
132
142
|
|
|
133
143
|
|
|
@@ -141,34 +151,15 @@ English | [简体中文](README_zh.md)
|
|
|
141
151
|
- [Offline Evaluation](#offline-evaluation)
|
|
142
152
|
- [Arena Mode](#arena-mode)
|
|
143
153
|
- [Model Serving Performance Evaluation](#Model-Serving-Performance-Evaluation)
|
|
144
|
-
- [Leaderboard](#leaderboard)
|
|
145
|
-
|
|
146
|
-
## 📝 Introduction
|
|
147
|
-
|
|
148
|
-
Large Model (including Large Language Models, Multi-modal Large Language Models) evaluation has become a critical process for assessing and improving LLMs. To better support the evaluation of large models, we propose the EvalScope framework.
|
|
149
154
|
|
|
150
|
-
### Framework Features
|
|
151
|
-
- **Benchmark Datasets**: Preloaded with several commonly used test benchmarks, including MMLU, CMMLU, C-Eval, GSM8K, ARC, HellaSwag, TruthfulQA, MATH, HumanEval, etc.
|
|
152
|
-
- **Evaluation Metrics**: Implements various commonly used evaluation metrics.
|
|
153
|
-
- **Model Access**: A unified model access mechanism that is compatible with the Generate and Chat interfaces of multiple model families.
|
|
154
|
-
- **Automated Evaluation**: Includes automatic evaluation of objective questions and complex task evaluation using expert models.
|
|
155
|
-
- **Evaluation Reports**: Automatically generates evaluation reports.
|
|
156
|
-
- **Arena Mode**: Used for comparisons between models and objective evaluation of models, supporting various evaluation modes, including:
|
|
157
|
-
- **Single mode**: Scoring a single model.
|
|
158
|
-
- **Pairwise-baseline mode**: Comparing against a baseline model.
|
|
159
|
-
- **Pairwise (all) mode**: Pairwise comparison among all models.
|
|
160
|
-
- **Visualization Tools**: Provides intuitive displays of evaluation results.
|
|
161
|
-
- **Model Performance Evaluation**: Offers a performance testing tool for model inference services and detailed statistics, see [Model Performance Evaluation Documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test.html).
|
|
162
|
-
- **OpenCompass Integration**: Supports OpenCompass as the evaluation backend, providing advanced encapsulation and task simplification, allowing for easier task submission for evaluation.
|
|
163
|
-
- **VLMEvalKit Integration**: Supports VLMEvalKit as the evaluation backend, facilitating the initiation of multi-modal evaluation tasks, supporting various multi-modal models and datasets.
|
|
164
|
-
- **Full-Link Support**: Through seamless integration with the [ms-swift](https://github.com/modelscope/ms-swift) training framework, provides a one-stop development process for model training, model deployment, model evaluation, and report viewing, enhancing user development efficiency.
|
|
165
155
|
|
|
156
|
+
## 📝 Introduction
|
|
166
157
|
|
|
167
|
-
|
|
158
|
+
EvalScope is the official model evaluation and performance benchmarking framework launched by the [ModelScope](https://modelscope.cn/) community. It comes with built-in common benchmarks and evaluation metrics, such as MMLU, CMMLU, C-Eval, GSM8K, ARC, HellaSwag, TruthfulQA, MATH, and HumanEval. EvalScope supports various types of model evaluations, including LLMs, multimodal LLMs, embedding models, and reranker models. It is also applicable to multiple evaluation scenarios, such as end-to-end RAG evaluation, arena mode, and model inference performance stress testing. Moreover, with the seamless integration of the ms-swift training framework, evaluations can be initiated with a single click, providing full end-to-end support from model training to evaluation 🚀
|
|
168
159
|
|
|
169
160
|
<p align="center">
|
|
170
161
|
<img src="docs/en/_static/images/evalscope_framework.png" width="70%">
|
|
171
|
-
<br>
|
|
162
|
+
<br>EvalScope Framework.
|
|
172
163
|
</p>
|
|
173
164
|
|
|
174
165
|
The architecture includes the following modules:
|
|
@@ -178,14 +169,17 @@ The architecture includes the following modules:
|
|
|
178
169
|
- **Native**: EvalScope’s own **default evaluation framework**, supporting various evaluation modes, including single model evaluation, arena mode, baseline model comparison mode, etc.
|
|
179
170
|
- **OpenCompass**: Supports [OpenCompass](https://github.com/open-compass/opencompass) as the evaluation backend, providing advanced encapsulation and task simplification, allowing you to submit tasks for evaluation more easily.
|
|
180
171
|
- **VLMEvalKit**: Supports [VLMEvalKit](https://github.com/open-compass/VLMEvalKit) as the evaluation backend, enabling easy initiation of multi-modal evaluation tasks, supporting various multi-modal models and datasets.
|
|
172
|
+
- **RAGEval**: Supports RAG evaluation, supporting independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html).
|
|
181
173
|
- **ThirdParty**: Other third-party evaluation tasks, such as ToolBench.
|
|
182
174
|
4. **Performance Evaluator**: Model performance evaluation, responsible for measuring model inference service performance, including performance testing, stress testing, performance report generation, and visualization.
|
|
183
175
|
5. **Evaluation Report**: The final generated evaluation report summarizes the model's performance, which can be used for decision-making and further model optimization.
|
|
184
176
|
6. **Visualization**: Visualization results help users intuitively understand evaluation results, facilitating analysis and comparison of different model performances.
|
|
185
|
-
</details>
|
|
186
177
|
|
|
187
178
|
|
|
188
179
|
## 🎉 News
|
|
180
|
+
- 🔥 **[2024.10.31]** The best practice for evaluating Multimodal-RAG has been updated, please check the [📖 Blog](https://evalscope.readthedocs.io/zh-cn/latest/blog/RAG/multimodal_RAG.html#multimodal-rag) for more details.
|
|
181
|
+
- 🔥 **[2024.10.23]** Supports multimodal RAG evaluation, including the assessment of image-text retrieval using [CLIP_Benchmark](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/clip_benchmark.html), and extends [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html) to support end-to-end multimodal metrics evaluation.
|
|
182
|
+
- 🔥 **[2024.10.8]** Support for RAG evaluation, including independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html).
|
|
189
183
|
- 🔥 **[2024.09.18]** Our documentation has been updated to include a blog module, featuring some technical research and discussions related to evaluations. We invite you to [📖 read it](https://evalscope.readthedocs.io/en/refact_readme/blog/index.html).
|
|
190
184
|
- 🔥 **[2024.09.12]** Support for LongWriter evaluation, which supports 10,000+ word generation. You can use the benchmark [LongBench-Write](evalscope/third_party/longbench_write/README.md) to measure the long output quality as well as the output length.
|
|
191
185
|
- 🔥 **[2024.08.30]** Support for custom dataset evaluations, including text datasets and multimodal image-text datasets.
|
|
@@ -356,9 +350,10 @@ run_task(task_cfg=your_task_cfg)
|
|
|
356
350
|
## Evaluation Backend
|
|
357
351
|
EvalScope supports using third-party evaluation frameworks to initiate evaluation tasks, which we call Evaluation Backend. Currently supported Evaluation Backend includes:
|
|
358
352
|
- **Native**: EvalScope's own **default evaluation framework**, supporting various evaluation modes including single model evaluation, arena mode, and baseline model comparison mode.
|
|
359
|
-
- [OpenCompass](https://github.com/open-compass/opencompass): Initiate OpenCompass evaluation tasks through EvalScope. Lightweight, easy to customize, supports seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/opencompass_backend.html)
|
|
360
|
-
- [VLMEvalKit](https://github.com/open-compass/VLMEvalKit): Initiate VLMEvalKit multimodal evaluation tasks through EvalScope. Supports various multimodal models and datasets, and offers seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/vlmevalkit_backend.html)
|
|
361
|
-
- **
|
|
353
|
+
- [OpenCompass](https://github.com/open-compass/opencompass): Initiate OpenCompass evaluation tasks through EvalScope. Lightweight, easy to customize, supports seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/opencompass_backend.html)
|
|
354
|
+
- [VLMEvalKit](https://github.com/open-compass/VLMEvalKit): Initiate VLMEvalKit multimodal evaluation tasks through EvalScope. Supports various multimodal models and datasets, and offers seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/vlmevalkit_backend.html)
|
|
355
|
+
- **RAGEval**: Initiate RAG evaluation tasks through EvalScope, supporting independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html): [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/index.html)
|
|
356
|
+
- **ThirdParty**: Third-party evaluation tasks, such as [ToolBench](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) and [LongBench-Write](https://evalscope.readthedocs.io/en/latest/third_party/longwriter.html).
|
|
362
357
|
|
|
363
358
|
## Custom Dataset Evaluation
|
|
364
359
|
EvalScope supports custom dataset evaluation. For detailed information, please refer to the Custom Dataset Evaluation [📖User Guide](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset.html)
|
|
@@ -380,13 +375,10 @@ A stress testing tool that focuses on large language models and can be customize
|
|
|
380
375
|
Refer to : Model Serving Performance Evaluation [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test.html)
|
|
381
376
|
|
|
382
377
|
|
|
383
|
-
## Leaderboard
|
|
384
|
-
The LLM Leaderboard aims to provide an objective and comprehensive evaluation standard and platform to help researchers and developers understand and compare the performance of models on various tasks on ModelScope.
|
|
385
|
-
|
|
386
|
-
Refer to : [Leaderboard](https://modelscope.cn/leaderboard/58/ranking?type=free)
|
|
387
|
-
|
|
388
378
|
|
|
389
379
|
## TO-DO List
|
|
380
|
+
- [x] RAG evaluation
|
|
381
|
+
- [x] VLM evaluation
|
|
390
382
|
- [x] Agents evaluation
|
|
391
383
|
- [x] vLLM
|
|
392
384
|
- [ ] Distributed evaluating
|
|
@@ -398,3 +390,7 @@ Refer to : [Leaderboard](https://modelscope.cn/leaderboard/58/ranking?type=free)
|
|
|
398
390
|
- [ ] Auto-reviewer
|
|
399
391
|
- [ ] Qwen-max
|
|
400
392
|
|
|
393
|
+
|
|
394
|
+
## Star History
|
|
395
|
+
|
|
396
|
+
[](https://star-history.com/#modelscope/evalscope&Date)
|
|
@@ -1,8 +1,11 @@
|
|
|
1
|
-
English | [简体中文](README_zh.md)
|
|
2
1
|
|
|
3
2
|
|
|
4
3
|

|
|
5
4
|
|
|
5
|
+
<p align="center">
|
|
6
|
+
English | <a href="README_zh.md">简体中文</a>
|
|
7
|
+
</p>
|
|
8
|
+
|
|
6
9
|
<p align="center">
|
|
7
10
|
<a href="https://badge.fury.io/py/evalscope"><img src="https://badge.fury.io/py/evalscope.svg" alt="PyPI version" height="18"></a>
|
|
8
11
|
<a href="https://pypi.org/project/evalscope"><img alt="PyPI - Downloads" src="https://static.pepy.tech/badge/evalscope">
|
|
@@ -11,7 +14,7 @@ English | [简体中文](README_zh.md)
|
|
|
11
14
|
<img src='https://readthedocs.org/projects/evalscope-en/badge/?version=latest' alt='Documentation Status' />
|
|
12
15
|
</a>
|
|
13
16
|
<br>
|
|
14
|
-
<a href="https://evalscope.readthedocs.io/en/latest/"
|
|
17
|
+
<a href="https://evalscope.readthedocs.io/en/latest/">📖 Documents</a>
|
|
15
18
|
<p>
|
|
16
19
|
|
|
17
20
|
|
|
@@ -25,34 +28,15 @@ English | [简体中文](README_zh.md)
|
|
|
25
28
|
- [Offline Evaluation](#offline-evaluation)
|
|
26
29
|
- [Arena Mode](#arena-mode)
|
|
27
30
|
- [Model Serving Performance Evaluation](#Model-Serving-Performance-Evaluation)
|
|
28
|
-
- [Leaderboard](#leaderboard)
|
|
29
|
-
|
|
30
|
-
## 📝 Introduction
|
|
31
|
-
|
|
32
|
-
Large Model (including Large Language Models, Multi-modal Large Language Models) evaluation has become a critical process for assessing and improving LLMs. To better support the evaluation of large models, we propose the EvalScope framework.
|
|
33
31
|
|
|
34
|
-
### Framework Features
|
|
35
|
-
- **Benchmark Datasets**: Preloaded with several commonly used test benchmarks, including MMLU, CMMLU, C-Eval, GSM8K, ARC, HellaSwag, TruthfulQA, MATH, HumanEval, etc.
|
|
36
|
-
- **Evaluation Metrics**: Implements various commonly used evaluation metrics.
|
|
37
|
-
- **Model Access**: A unified model access mechanism that is compatible with the Generate and Chat interfaces of multiple model families.
|
|
38
|
-
- **Automated Evaluation**: Includes automatic evaluation of objective questions and complex task evaluation using expert models.
|
|
39
|
-
- **Evaluation Reports**: Automatically generates evaluation reports.
|
|
40
|
-
- **Arena Mode**: Used for comparisons between models and objective evaluation of models, supporting various evaluation modes, including:
|
|
41
|
-
- **Single mode**: Scoring a single model.
|
|
42
|
-
- **Pairwise-baseline mode**: Comparing against a baseline model.
|
|
43
|
-
- **Pairwise (all) mode**: Pairwise comparison among all models.
|
|
44
|
-
- **Visualization Tools**: Provides intuitive displays of evaluation results.
|
|
45
|
-
- **Model Performance Evaluation**: Offers a performance testing tool for model inference services and detailed statistics, see [Model Performance Evaluation Documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test.html).
|
|
46
|
-
- **OpenCompass Integration**: Supports OpenCompass as the evaluation backend, providing advanced encapsulation and task simplification, allowing for easier task submission for evaluation.
|
|
47
|
-
- **VLMEvalKit Integration**: Supports VLMEvalKit as the evaluation backend, facilitating the initiation of multi-modal evaluation tasks, supporting various multi-modal models and datasets.
|
|
48
|
-
- **Full-Link Support**: Through seamless integration with the [ms-swift](https://github.com/modelscope/ms-swift) training framework, provides a one-stop development process for model training, model deployment, model evaluation, and report viewing, enhancing user development efficiency.
|
|
49
32
|
|
|
33
|
+
## 📝 Introduction
|
|
50
34
|
|
|
51
|
-
|
|
35
|
+
EvalScope is the official model evaluation and performance benchmarking framework launched by the [ModelScope](https://modelscope.cn/) community. It comes with built-in common benchmarks and evaluation metrics, such as MMLU, CMMLU, C-Eval, GSM8K, ARC, HellaSwag, TruthfulQA, MATH, and HumanEval. EvalScope supports various types of model evaluations, including LLMs, multimodal LLMs, embedding models, and reranker models. It is also applicable to multiple evaluation scenarios, such as end-to-end RAG evaluation, arena mode, and model inference performance stress testing. Moreover, with the seamless integration of the ms-swift training framework, evaluations can be initiated with a single click, providing full end-to-end support from model training to evaluation 🚀
|
|
52
36
|
|
|
53
37
|
<p align="center">
|
|
54
38
|
<img src="docs/en/_static/images/evalscope_framework.png" width="70%">
|
|
55
|
-
<br>
|
|
39
|
+
<br>EvalScope Framework.
|
|
56
40
|
</p>
|
|
57
41
|
|
|
58
42
|
The architecture includes the following modules:
|
|
@@ -62,14 +46,17 @@ The architecture includes the following modules:
|
|
|
62
46
|
- **Native**: EvalScope’s own **default evaluation framework**, supporting various evaluation modes, including single model evaluation, arena mode, baseline model comparison mode, etc.
|
|
63
47
|
- **OpenCompass**: Supports [OpenCompass](https://github.com/open-compass/opencompass) as the evaluation backend, providing advanced encapsulation and task simplification, allowing you to submit tasks for evaluation more easily.
|
|
64
48
|
- **VLMEvalKit**: Supports [VLMEvalKit](https://github.com/open-compass/VLMEvalKit) as the evaluation backend, enabling easy initiation of multi-modal evaluation tasks, supporting various multi-modal models and datasets.
|
|
49
|
+
- **RAGEval**: Supports RAG evaluation, supporting independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html).
|
|
65
50
|
- **ThirdParty**: Other third-party evaluation tasks, such as ToolBench.
|
|
66
51
|
4. **Performance Evaluator**: Model performance evaluation, responsible for measuring model inference service performance, including performance testing, stress testing, performance report generation, and visualization.
|
|
67
52
|
5. **Evaluation Report**: The final generated evaluation report summarizes the model's performance, which can be used for decision-making and further model optimization.
|
|
68
53
|
6. **Visualization**: Visualization results help users intuitively understand evaluation results, facilitating analysis and comparison of different model performances.
|
|
69
|
-
</details>
|
|
70
54
|
|
|
71
55
|
|
|
72
56
|
## 🎉 News
|
|
57
|
+
- 🔥 **[2024.10.31]** The best practice for evaluating Multimodal-RAG has been updated, please check the [📖 Blog](https://evalscope.readthedocs.io/zh-cn/latest/blog/RAG/multimodal_RAG.html#multimodal-rag) for more details.
|
|
58
|
+
- 🔥 **[2024.10.23]** Supports multimodal RAG evaluation, including the assessment of image-text retrieval using [CLIP_Benchmark](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/clip_benchmark.html), and extends [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html) to support end-to-end multimodal metrics evaluation.
|
|
59
|
+
- 🔥 **[2024.10.8]** Support for RAG evaluation, including independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html).
|
|
73
60
|
- 🔥 **[2024.09.18]** Our documentation has been updated to include a blog module, featuring some technical research and discussions related to evaluations. We invite you to [📖 read it](https://evalscope.readthedocs.io/en/refact_readme/blog/index.html).
|
|
74
61
|
- 🔥 **[2024.09.12]** Support for LongWriter evaluation, which supports 10,000+ word generation. You can use the benchmark [LongBench-Write](evalscope/third_party/longbench_write/README.md) to measure the long output quality as well as the output length.
|
|
75
62
|
- 🔥 **[2024.08.30]** Support for custom dataset evaluations, including text datasets and multimodal image-text datasets.
|
|
@@ -240,9 +227,10 @@ run_task(task_cfg=your_task_cfg)
|
|
|
240
227
|
## Evaluation Backend
|
|
241
228
|
EvalScope supports using third-party evaluation frameworks to initiate evaluation tasks, which we call Evaluation Backend. Currently supported Evaluation Backend includes:
|
|
242
229
|
- **Native**: EvalScope's own **default evaluation framework**, supporting various evaluation modes including single model evaluation, arena mode, and baseline model comparison mode.
|
|
243
|
-
- [OpenCompass](https://github.com/open-compass/opencompass): Initiate OpenCompass evaluation tasks through EvalScope. Lightweight, easy to customize, supports seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/opencompass_backend.html)
|
|
244
|
-
- [VLMEvalKit](https://github.com/open-compass/VLMEvalKit): Initiate VLMEvalKit multimodal evaluation tasks through EvalScope. Supports various multimodal models and datasets, and offers seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/vlmevalkit_backend.html)
|
|
245
|
-
- **
|
|
230
|
+
- [OpenCompass](https://github.com/open-compass/opencompass): Initiate OpenCompass evaluation tasks through EvalScope. Lightweight, easy to customize, supports seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/opencompass_backend.html)
|
|
231
|
+
- [VLMEvalKit](https://github.com/open-compass/VLMEvalKit): Initiate VLMEvalKit multimodal evaluation tasks through EvalScope. Supports various multimodal models and datasets, and offers seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/vlmevalkit_backend.html)
|
|
232
|
+
- **RAGEval**: Initiate RAG evaluation tasks through EvalScope, supporting independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html): [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/index.html)
|
|
233
|
+
- **ThirdParty**: Third-party evaluation tasks, such as [ToolBench](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) and [LongBench-Write](https://evalscope.readthedocs.io/en/latest/third_party/longwriter.html).
|
|
246
234
|
|
|
247
235
|
## Custom Dataset Evaluation
|
|
248
236
|
EvalScope supports custom dataset evaluation. For detailed information, please refer to the Custom Dataset Evaluation [📖User Guide](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset.html)
|
|
@@ -264,13 +252,10 @@ A stress testing tool that focuses on large language models and can be customize
|
|
|
264
252
|
Refer to : Model Serving Performance Evaluation [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test.html)
|
|
265
253
|
|
|
266
254
|
|
|
267
|
-
## Leaderboard
|
|
268
|
-
The LLM Leaderboard aims to provide an objective and comprehensive evaluation standard and platform to help researchers and developers understand and compare the performance of models on various tasks on ModelScope.
|
|
269
|
-
|
|
270
|
-
Refer to : [Leaderboard](https://modelscope.cn/leaderboard/58/ranking?type=free)
|
|
271
|
-
|
|
272
255
|
|
|
273
256
|
## TO-DO List
|
|
257
|
+
- [x] RAG evaluation
|
|
258
|
+
- [x] VLM evaluation
|
|
274
259
|
- [x] Agents evaluation
|
|
275
260
|
- [x] vLLM
|
|
276
261
|
- [ ] Distributed evaluating
|
|
@@ -282,3 +267,7 @@ Refer to : [Leaderboard](https://modelscope.cn/leaderboard/58/ranking?type=free)
|
|
|
282
267
|
- [ ] Auto-reviewer
|
|
283
268
|
- [ ] Qwen-max
|
|
284
269
|
|
|
270
|
+
|
|
271
|
+
## Star History
|
|
272
|
+
|
|
273
|
+
[](https://star-history.com/#modelscope/evalscope&Date)
|
{evalscope-0.5.5rc1 → evalscope-0.6.0rc0}/evalscope/backend/opencompass/tasks/eval_datasets.py
RENAMED
|
@@ -50,6 +50,7 @@ with read_base():
|
|
|
50
50
|
from opencompass.configs.datasets.nq.nq_gen_c788f6 import nq_datasets
|
|
51
51
|
from opencompass.configs.datasets.triviaqa.triviaqa_gen_2121ce import triviaqa_datasets
|
|
52
52
|
from opencompass.configs.datasets.cmb.cmb_gen_dfb5c4 import cmb_datasets
|
|
53
|
+
from opencompass.configs.datasets.cmmlu.cmmlu_gen_c13365 import cmmlu_datasets
|
|
53
54
|
from opencompass.configs.datasets.bbh.bbh_gen_5b92b0 import bbh_datasets
|
|
54
55
|
|
|
55
56
|
# Note: to be supported
|
|
@@ -0,0 +1,4 @@
|
|
|
1
|
+
from evalscope.backend.rag_eval.utils.embedding import EmbeddingModel
|
|
2
|
+
from evalscope.backend.rag_eval.utils.llm import LLM, LocalLLM, ChatOpenAI
|
|
3
|
+
from evalscope.backend.rag_eval.utils.clip import VisionModel
|
|
4
|
+
from evalscope.backend.rag_eval.backend_manager import RAGEvalBackendManager
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from typing import Optional, Union
|
|
3
|
+
from evalscope.utils import is_module_installed, get_valid_list
|
|
4
|
+
from evalscope.backend.base import BackendManager
|
|
5
|
+
from evalscope.utils.logger import get_logger
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
logger = get_logger()
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class RAGEvalBackendManager(BackendManager):
|
|
12
|
+
def __init__(self, config: Union[str, dict], **kwargs):
|
|
13
|
+
"""BackendManager for VLM Evaluation Kit
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
config (Union[str, dict]): the configuration yaml-file or the configuration dictionary
|
|
17
|
+
"""
|
|
18
|
+
super().__init__(config, **kwargs)
|
|
19
|
+
|
|
20
|
+
@staticmethod
|
|
21
|
+
def _check_env(module_name: str):
|
|
22
|
+
if is_module_installed(module_name):
|
|
23
|
+
logger.info(f"Check `{module_name}` Installed")
|
|
24
|
+
else:
|
|
25
|
+
logger.error(f"Please install `{module_name}` first")
|
|
26
|
+
|
|
27
|
+
@staticmethod
|
|
28
|
+
def run_mteb(model_args, eval_args):
|
|
29
|
+
from evalscope.backend.rag_eval.cmteb import ModelArguments, EvalArguments
|
|
30
|
+
from evalscope.backend.rag_eval.cmteb import one_stage_eval, two_stage_eval
|
|
31
|
+
|
|
32
|
+
if len(model_args) > 2:
|
|
33
|
+
raise ValueError("Not support multiple models yet")
|
|
34
|
+
|
|
35
|
+
# Convert arguments to dictionary
|
|
36
|
+
model_args_list = [ModelArguments(**args).to_dict() for args in model_args]
|
|
37
|
+
eval_args = EvalArguments(**eval_args).to_dict()
|
|
38
|
+
|
|
39
|
+
if len(model_args_list) == 1:
|
|
40
|
+
one_stage_eval(model_args_list[0], eval_args)
|
|
41
|
+
else: # len(model_args_list) == 2
|
|
42
|
+
two_stage_eval(model_args_list[0], model_args_list[1], eval_args)
|
|
43
|
+
|
|
44
|
+
@staticmethod
|
|
45
|
+
def run_ragas(testset_args, eval_args):
|
|
46
|
+
from evalscope.backend.rag_eval.ragas import rag_eval
|
|
47
|
+
from evalscope.backend.rag_eval.ragas.tasks import generate_testset
|
|
48
|
+
from evalscope.backend.rag_eval.ragas import (
|
|
49
|
+
TestsetGenerationArguments,
|
|
50
|
+
EvaluationArguments,
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
if testset_args is not None:
|
|
54
|
+
generate_testset(TestsetGenerationArguments(**testset_args))
|
|
55
|
+
if eval_args is not None:
|
|
56
|
+
rag_eval(EvaluationArguments(**eval_args))
|
|
57
|
+
|
|
58
|
+
@staticmethod
|
|
59
|
+
def run_clip_benchmark(args):
|
|
60
|
+
from evalscope.backend.rag_eval.clip_benchmark import Arguments, evaluate
|
|
61
|
+
|
|
62
|
+
evaluate(Arguments(**args))
|
|
63
|
+
|
|
64
|
+
def run(self, *args, **kwargs):
|
|
65
|
+
tool = self.config_d.pop("tool")
|
|
66
|
+
if tool.lower() == "mteb":
|
|
67
|
+
self._check_env("mteb")
|
|
68
|
+
model_args = self.config_d["model"]
|
|
69
|
+
eval_args = self.config_d["eval"]
|
|
70
|
+
self.run_mteb(model_args, eval_args)
|
|
71
|
+
elif tool.lower() == "ragas":
|
|
72
|
+
self._check_env("ragas")
|
|
73
|
+
testset_args = self.config_d.get("testset_generation", None)
|
|
74
|
+
eval_args = self.config_d.get("eval", None)
|
|
75
|
+
self.run_ragas(testset_args, eval_args)
|
|
76
|
+
elif tool.lower() == "clip_benchmark":
|
|
77
|
+
self._check_env("webdataset")
|
|
78
|
+
self.run_clip_benchmark(self.config_d["eval"])
|
|
79
|
+
else:
|
|
80
|
+
raise ValueError(f"Unknown tool: {tool}")
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
|
+
from typing import List, Dict
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
@dataclass
|
|
6
|
+
class Arguments:
|
|
7
|
+
# fmt: off
|
|
8
|
+
"""
|
|
9
|
+
A dataclass to store and manage the arguments for the model configuration and data processing.
|
|
10
|
+
"""
|
|
11
|
+
"""
|
|
12
|
+
For CLIP model support, you can use the following fields:
|
|
13
|
+
model_name: str
|
|
14
|
+
revision: str = "master"
|
|
15
|
+
hub: str = "modelscope"
|
|
16
|
+
|
|
17
|
+
For API VLM model support, you can use the following fields, (image caption only):
|
|
18
|
+
model_name="gpt-4o-mini"
|
|
19
|
+
api_base: str = "",
|
|
20
|
+
api_key: Optional[str] = None
|
|
21
|
+
prompt: str = None
|
|
22
|
+
"""
|
|
23
|
+
models: List[Dict] = field(default_factory=dict) # List of paths to the pre-trained models or model identifiers
|
|
24
|
+
dataset_name: List[str] = field(default_factory=list) # List of dataset names to be used
|
|
25
|
+
data_dir: str = None # Root directory where the datasets are stored
|
|
26
|
+
split: str = "test" # Split of the dataset to be used (e.g., 'train', 'validation', 'test')
|
|
27
|
+
task: str = None
|
|
28
|
+
batch_size: int = 128 # Batch size for data loading
|
|
29
|
+
num_workers: int = 1 # Number of workers for data loading
|
|
30
|
+
verbose: bool = True # Flag to enable verbose logging
|
|
31
|
+
output_dir: str = "outputs" # Directory where the outputs (e.g., predictions, logs) will be saved
|
|
32
|
+
cache_dir: str = "cache" # Directory where the dataset cache will be stored
|
|
33
|
+
skip_existing: bool = False # Flag to skip processing if outputs already exist
|
|
34
|
+
limit: int = None # Limit the number of samples to be processed
|