evalscope 1.0.0__tar.gz → 1.0.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {evalscope-1.0.0/evalscope.egg-info → evalscope-1.0.2}/PKG-INFO +10 -7
- {evalscope-1.0.0 → evalscope-1.0.2}/README.md +9 -6
- evalscope-1.0.2/evalscope/api/benchmark/__init__.py +3 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/benchmark/adapters/__init__.py +2 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/benchmark/adapters/default_data_adapter.py +7 -4
- evalscope-1.0.2/evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/benchmark/adapters/multi_choice_adapter.py +5 -2
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/benchmark/adapters/text2image_adapter.py +12 -10
- evalscope-1.0.2/evalscope/api/benchmark/adapters/vision_language_adapter.py +8 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/benchmark/benchmark.py +62 -2
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/benchmark/meta.py +9 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/dataset/dataset.py +6 -6
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/dataset/loader.py +2 -1
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/evaluator/cache.py +24 -1
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/evaluator/evaluator.py +5 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/evaluator/state.py +17 -1
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/messages/__init__.py +1 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/messages/chat_message.py +52 -2
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/metric/scorer.py +15 -7
- evalscope-1.0.2/evalscope/api/mixin/__init__.py +2 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/mixin/llm_judge_mixin.py +2 -0
- evalscope-1.0.2/evalscope/api/mixin/sandbox_mixin.py +204 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/model/generate_config.py +1 -6
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/model/model.py +5 -2
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/tool/tool_info.py +1 -1
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/app/app.py +3 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/app/ui/single_model.py +3 -3
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/app/utils/data_utils.py +7 -7
- evalscope-1.0.2/evalscope/app/utils/env_utils.py +12 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/app/utils/text_utils.py +14 -12
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/arguments.py +8 -4
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/opencompass/backend_manager.py +0 -2
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/utils/embedding.py +9 -1
- evalscope-1.0.2/evalscope/benchmarks/ai2d/ai2d_adapter.py +53 -0
- evalscope-1.0.2/evalscope/benchmarks/amc/amc_adapter.py +46 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/bbh_adapter.py +43 -17
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bfcl/bfcl_adapter.py +142 -7
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bfcl/generation.py +9 -9
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/ceval/ceval_adapter.py +1 -2
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/data_collection/data_collection_adapter.py +23 -19
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/drop/drop_adapter.py +1 -1
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/frames/frames_adapter.py +2 -1
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/general_arena/general_arena_adapter.py +5 -1
- evalscope-1.0.2/evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
- evalscope-1.0.2/evalscope/benchmarks/healthbench/utils.py +102 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/humaneval/humaneval_adapter.py +19 -35
- evalscope-1.0.2/evalscope/benchmarks/humaneval/utils.py +235 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/ifeval/instructions_util.py +2 -3
- evalscope-1.0.2/evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
- evalscope-1.0.2/evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
- evalscope-1.0.2/evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/live_code_bench/evaluate_utils.py +13 -6
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +60 -37
- evalscope-1.0.2/evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/math_500/math_500_adapter.py +0 -1
- evalscope-1.0.2/evalscope/benchmarks/math_vista/math_vista_adapter.py +129 -0
- evalscope-1.0.2/evalscope/benchmarks/minerva_math/minerva_math_adapter.py +48 -0
- evalscope-1.0.2/evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
- evalscope-1.0.2/evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
- evalscope-1.0.2/evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
- evalscope-1.0.2/evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +124 -0
- evalscope-1.0.2/evalscope/benchmarks/multi_if/ifeval.py +3354 -0
- evalscope-1.0.2/evalscope/benchmarks/multi_if/metrics.py +120 -0
- evalscope-1.0.2/evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +6 -5
- evalscope-1.0.2/evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
- evalscope-1.0.2/evalscope/benchmarks/olympiad_bench/utils.py +565 -0
- evalscope-1.0.2/evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
- {evalscope-1.0.0/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models → evalscope-1.0.2/evalscope/benchmarks/process_bench}/__init__.py +0 -0
- evalscope-1.0.2/evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/tau_bench/generation.py +1 -1
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/tau_bench/tau_bench_adapter.py +20 -19
- evalscope-1.0.2/evalscope/benchmarks/text2image/__init__.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/aigc/t2i → evalscope-1.0.2/evalscope/benchmarks/text2image}/evalmuse_adapter.py +3 -1
- {evalscope-1.0.0/evalscope/benchmarks/aigc/t2i → evalscope-1.0.2/evalscope/benchmarks/text2image}/genai_bench_adapter.py +2 -2
- {evalscope-1.0.0/evalscope/benchmarks/aigc/t2i → evalscope-1.0.2/evalscope/benchmarks/text2image}/general_t2i_adapter.py +1 -1
- {evalscope-1.0.0/evalscope/benchmarks/aigc/t2i → evalscope-1.0.2/evalscope/benchmarks/text2image}/hpdv2_adapter.py +7 -2
- {evalscope-1.0.0/evalscope/benchmarks/aigc/t2i → evalscope-1.0.2/evalscope/benchmarks/text2image}/tifa_adapter.py +1 -0
- evalscope-1.0.2/evalscope/benchmarks/tool_bench/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +1 -2
- evalscope-1.0.2/evalscope/benchmarks/winogrande/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/cli/start_app.py +7 -1
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/cli/start_perf.py +7 -1
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/config.py +96 -14
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/constants.py +11 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/evaluator/evaluator.py +30 -10
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/llm_judge.py +19 -7
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/metric.py +27 -2
- evalscope-1.0.2/evalscope/metrics/t2v_metrics/__init__.py +0 -0
- evalscope-1.0.2/evalscope/metrics/t2v_metrics/models/__init__.py +0 -0
- evalscope-1.0.2/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/__init__.py +0 -0
- evalscope-1.0.2/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/__init__.py +0 -0
- evalscope-1.0.2/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/__init__.py +0 -0
- evalscope-1.0.2/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/__init__.py +0 -0
- evalscope-1.0.2/evalscope/models/image_edit_model.py +125 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/models/model_apis.py +22 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/models/openai_compatible.py +3 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/models/text2image_model.py +2 -2
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/models/utils/openai.py +8 -6
- evalscope-1.0.2/evalscope/perf/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/arguments.py +2 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/benchmark.py +2 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/plugin/api/base.py +2 -2
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/plugin/api/default_api.py +7 -7
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/plugin/api/openai_api.py +83 -19
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/plugin/datasets/flickr8k.py +2 -2
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/plugin/datasets/kontext_bench.py +2 -2
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/plugin/datasets/random_vl_dataset.py +2 -2
- evalscope-1.0.2/evalscope/perf/utils/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/utils/benchmark_util.py +7 -5
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/utils/local_server.py +3 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/report/__init__.py +0 -1
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/report/combinator.py +0 -25
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/report/generator.py +8 -87
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/report/report.py +8 -4
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/run.py +9 -5
- evalscope-1.0.2/evalscope/third_party/thinkbench/tools/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -4
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/utils/chat_service.py +1 -1
- evalscope-1.0.2/evalscope/utils/function_utils.py +70 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/utils/import_utils.py +73 -1
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/utils/io_utils.py +56 -7
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/utils/json_schema.py +23 -2
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/utils/logger.py +19 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/utils/model_utils.py +4 -3
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/utils/multi_choices.py +23 -6
- evalscope-1.0.2/evalscope/version.py +4 -0
- {evalscope-1.0.0 → evalscope-1.0.2/evalscope.egg-info}/PKG-INFO +10 -7
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope.egg-info/SOURCES.txt +54 -12
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope.egg-info/requires.txt +7 -17
- {evalscope-1.0.0 → evalscope-1.0.2}/requirements/aigc.txt +1 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/requirements/dev.txt +0 -2
- {evalscope-1.0.0 → evalscope-1.0.2}/requirements/framework.txt +1 -5
- {evalscope-1.0.0 → evalscope-1.0.2}/requirements/perf.txt +1 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/requirements/rag.txt +1 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/setup.py +0 -1
- {evalscope-1.0.0 → evalscope-1.0.2}/tests/benchmark/test_eval.py +80 -37
- evalscope-1.0.2/tests/benchmark/test_image_edit.py +65 -0
- evalscope-1.0.2/tests/benchmark/test_sandbox.py +81 -0
- evalscope-1.0.2/tests/benchmark/test_vlm.py +137 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/tests/cli/test_all.py +83 -43
- {evalscope-1.0.0 → evalscope-1.0.2}/tests/cli/test_collection.py +8 -5
- evalscope-1.0.2/tests/cli/test_reasoning.py +81 -0
- evalscope-1.0.2/tests/common.py +73 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/tests/perf/test_perf.py +44 -14
- evalscope-1.0.2/tests/rag/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/tests/rag/test_clip_benchmark.py +0 -3
- evalscope-1.0.0/evalscope/api/benchmark/__init__.py +0 -3
- evalscope-1.0.0/evalscope/api/mixin/__init__.py +0 -2
- evalscope-1.0.0/evalscope/api/mixin/dataset_mixin.py +0 -105
- evalscope-1.0.0/evalscope/benchmarks/aigc/i2i/general_i2i_adapter.py +0 -44
- evalscope-1.0.0/evalscope/utils/function_utils.py +0 -29
- evalscope-1.0.0/evalscope/version.py +0 -4
- evalscope-1.0.0/tests/vlm/__init__.py +0 -1
- {evalscope-1.0.0 → evalscope-1.0.2}/LICENSE +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/MANIFEST.in +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/dataset/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/dataset/utils.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/evaluator/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/filter/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/filter/filter.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/messages/content.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/messages/utils.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/metric/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/metric/metric.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/model/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/model/model_output.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/registry.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/tool/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/tool/tool_call.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/tool/utils.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/app/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/app/arguments.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/app/constants.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/app/ui/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/app/ui/app_ui.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/app/ui/multi_model.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/app/ui/sidebar.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/app/ui/visualization.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/app/utils/localization.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/app/utils/visualization.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/base.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/opencompass/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/opencompass/api_meta_template.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/opencompass/tasks/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/opencompass/tasks/eval_api.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/opencompass/tasks/eval_datasets.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/backend_manager.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/clip_benchmark/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/clip_benchmark/arguments.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/clip_benchmark/task_template.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/clip_benchmark/tasks/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/clip_benchmark/tasks/image_caption.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_classification.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_retrieval.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/clip_benchmark/utils/webdataset_convert.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/clip_benchmark/utils/webdatasets.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/cmteb/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/cmteb/arguments.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/cmteb/base.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/cmteb/task_template.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/cmteb/tasks/Classification.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/cmteb/tasks/Clustering.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/cmteb/tasks/CustomTask.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/cmteb/tasks/PairClassification.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/cmteb/tasks/Reranking.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/cmteb/tasks/Retrieval.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/cmteb/tasks/STS.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/cmteb/tasks/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/ragas/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/ragas/arguments.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/ragas/prompts/persona_prompt.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/ragas/task_template.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/ragas/tasks/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/ragas/tasks/build_transform.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/utils/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/utils/clip.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/utils/llm.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/rag_eval/utils/tools.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/vlm_eval_kit/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/backend/vlm_eval_kit/backend_manager.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/__init__.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/aigc → evalscope-1.0.2/evalscope/benchmarks/ai2d}/__init__.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/aigc/i2i → evalscope-1.0.2/evalscope/benchmarks/aime}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/aime/aime24_adapter.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/aime/aime25_adapter.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/aigc/t2i → evalscope-1.0.2/evalscope/benchmarks/alpaca_eval}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/aime → evalscope-1.0.2/evalscope/benchmarks/amc}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/arc/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/arc/arc_adapter.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/alpaca_eval → evalscope-1.0.2/evalscope/benchmarks/arena_hard}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/arena_hard/arena_hard_adapter.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/arena_hard/utils.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/boolean_expressions.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/causal_judgement.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/date_understanding.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/disambiguation_qa.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/dyck_languages.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/formal_fallacies.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/geometric_shapes.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/hyperbaton.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_five_objects.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_seven_objects.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_three_objects.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/movie_recommendation.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/multistep_arithmetic_two.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/navigate.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/object_counting.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/penguins_in_a_table.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/reasoning_about_colored_objects.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/ruin_names.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/salient_translation_error_detection.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/snarks.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/sports_understanding.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/temporal_sequences.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_five_objects.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_seven_objects.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_three_objects.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/web_of_lies.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/bbh/cot_prompts/word_sorting.txt +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/arena_hard → evalscope-1.0.2/evalscope/benchmarks/bfcl}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/ceval/__init__.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/bfcl → evalscope-1.0.2/evalscope/benchmarks/chinese_simple_qa}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/cmmlu/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/cmmlu/cmmlu_adapter.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/competition_math/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/competition_math/competition_math_adapter.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/chinese_simple_qa → evalscope-1.0.2/evalscope/benchmarks/data_collection}/__init__.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/data_collection → evalscope-1.0.2/evalscope/benchmarks/docmath}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/docmath/docmath_adapter.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/docmath/utils.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/docmath → evalscope-1.0.2/evalscope/benchmarks/drop}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/drop/utils.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/drop → evalscope-1.0.2/evalscope/benchmarks/frames}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/frames/utils.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/frames → evalscope-1.0.2/evalscope/benchmarks/general_arena}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/general_arena/utils.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/general_arena → evalscope-1.0.2/evalscope/benchmarks/general_mcq}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/general_mcq/general_mcq_adapter.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/general_qa/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/general_qa/general_qa_adapter.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/general_mcq → evalscope-1.0.2/evalscope/benchmarks/gpqa}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/gpqa/gpqa_adapter.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/gpqa/prompt.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/gsm8k/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/gsm8k/gsm8k_adapter.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/gpqa → evalscope-1.0.2/evalscope/benchmarks/healthbench}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/hellaswag/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/hellaswag/hellaswag_adapter.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/hle/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/hle/hle_adapter.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/humaneval/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/ifeval/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/ifeval/ifeval_adapter.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/ifeval/instructions.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/ifeval/instructions_registry.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/ifeval/utils.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/iquiz → evalscope-1.0.2/evalscope/benchmarks/image_edit}/__init__.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/live_code_bench → evalscope-1.0.2/evalscope/benchmarks/image_edit/gedit}/__init__.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/maritime_bench → evalscope-1.0.2/evalscope/benchmarks/iquiz}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/iquiz/iquiz_adapter.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/math_500 → evalscope-1.0.2/evalscope/benchmarks/live_code_bench}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/live_code_bench/extract_utils.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/live_code_bench/load_utils.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/live_code_bench/pass_k_utils.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/live_code_bench/prompts.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/live_code_bench/testing_util.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/mmlu_pro → evalscope-1.0.2/evalscope/benchmarks/maritime_bench}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/mmlu_redux → evalscope-1.0.2/evalscope/benchmarks/math_500}/__init__.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/musr → evalscope-1.0.2/evalscope/benchmarks/math_vista}/__init__.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/needle_haystack → evalscope-1.0.2/evalscope/benchmarks/minerva_math}/__init__.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/process_bench → evalscope-1.0.2/evalscope/benchmarks/mm_bench}/__init__.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/simple_qa → evalscope-1.0.2/evalscope/benchmarks/mm_star}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/mmlu/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/mmlu/mmlu_adapter.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/super_gpqa → evalscope-1.0.2/evalscope/benchmarks/mmlu_pro}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/tau_bench → evalscope-1.0.2/evalscope/benchmarks/mmlu_redux}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/tool_bench → evalscope-1.0.2/evalscope/benchmarks/mmmu}/__init__.py +0 -0
- {evalscope-1.0.0/evalscope/benchmarks/winogrande → evalscope-1.0.2/evalscope/benchmarks/mmmu_pro}/__init__.py +0 -0
- {evalscope-1.0.0/evalscope/metrics/t2v_metrics → evalscope-1.0.2/evalscope/benchmarks/multi_if}/__init__.py +0 -0
- {evalscope-1.0.0/evalscope/metrics/t2v_metrics/models → evalscope-1.0.2/evalscope/benchmarks/musr}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/musr/musr_adapter.py +0 -0
- {evalscope-1.0.0/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model → evalscope-1.0.2/evalscope/benchmarks/needle_haystack}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/needle_haystack/utils.py +0 -0
- {evalscope-1.0.0/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward → evalscope-1.0.2/evalscope/benchmarks/olympiad_bench}/__init__.py +0 -0
- {evalscope-1.0.0/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5 → evalscope-1.0.2/evalscope/benchmarks/omni_bench}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/process_bench/process_bench_adapter.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/race/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/race/race_adapter.py +0 -0
- {evalscope-1.0.0/evalscope/perf → evalscope-1.0.2/evalscope/benchmarks/real_world_qa}/__init__.py +0 -0
- {evalscope-1.0.0/evalscope/perf/utils → evalscope-1.0.2/evalscope/benchmarks/simple_qa}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/simple_qa/simple_qa_adapter.py +0 -0
- {evalscope-1.0.0/evalscope/third_party/thinkbench/tools → evalscope-1.0.2/evalscope/benchmarks/super_gpqa}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/super_gpqa/prompt.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/super_gpqa/utils.py +0 -0
- {evalscope-1.0.0/tests/rag → evalscope-1.0.2/evalscope/benchmarks/tau_bench}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/tool_bench/tool_bench_adapter.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/tool_bench/utils.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/trivia_qa/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/trivia_qa/samples.jsonl +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/truthful_qa/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/benchmarks/winogrande/winogrande_adapter.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/cli/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/cli/base.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/cli/cli.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/cli/start_eval.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/cli/start_server.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/collections/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/collections/sampler.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/collections/schema.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/evaluator/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/filters/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/filters/extraction.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/filters/selection.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/bundled_rouge_score/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/bundled_rouge_score/rouge_scorer.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/math_parser.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/metrics.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/rouge_metric.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/clipscore.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/constants.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/itmscore.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/clipscore_models/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/base_model.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/itmscore_models/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/model.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/utils.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/builder.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_projector/builder.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/default.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_flant5xl.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt2.7b.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt6.7b.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_coco.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xl.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xxl.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna13b.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_no_prefix.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_prefix.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_vitL.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xxl.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt2.7b.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt6.7b.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_vitL.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna13b.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna7b.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config.json +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config_albef.json +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_large_config.json +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/base_model.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_image_text_matching.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_caption.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_classification.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_feature_extractor.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_pretrain.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/base_processor.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/blip_processors.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/score.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/metrics/t2v_metrics/vqascore.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/models/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/models/mockllm.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/models/modelscope.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/http_client.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/main.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/plugin/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/plugin/api/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/plugin/api/custom_api.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/plugin/api/dashscope_api.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/plugin/datasets/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/plugin/datasets/base.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/plugin/datasets/custom.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/plugin/datasets/line_by_line.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/plugin/datasets/longalpaca.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/plugin/datasets/openqa.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/plugin/datasets/random_dataset.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/plugin/datasets/speed_benchmark.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/plugin/registry.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/utils/analysis_result.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/utils/db_util.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/utils/handler.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/utils/log_utils.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/perf/utils/rich_display.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/summarizer.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/longbench_write/README.md +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/longbench_write/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/longbench_write/default_task.json +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/longbench_write/default_task.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/longbench_write/eval.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/longbench_write/infer.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/longbench_write/longbench_write.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/longbench_write/resources/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/longbench_write/resources/judge.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/longbench_write/resources/longbench_write.jsonl +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/longbench_write/resources/longbench_write_en.jsonl +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/longbench_write/resources/longwrite_ruler.jsonl +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/longbench_write/tools/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/longbench_write/tools/data_etl.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/longbench_write/tools/openai_api.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/longbench_write/utils.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/thinkbench/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/thinkbench/eval.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/thinkbench/infer.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/thinkbench/resources/critique_template.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/thinkbench/resources/reformat_template.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/thinkbench/tools/llm.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/thinkbench/tools/utils.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/toolbench_static/README.md +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/toolbench_static/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/toolbench_static/config_default.json +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/toolbench_static/config_default.yaml +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/toolbench_static/eval.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/toolbench_static/infer.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/toolbench_static/llm/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/toolbench_static/requirements.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/third_party/toolbench_static/toolbench_static.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/utils/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/utils/argument_utils.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/utils/deprecation_utils.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope/utils/url_utils.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope.egg-info/dependency_links.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope.egg-info/entry_points.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope.egg-info/not-zip-safe +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/evalscope.egg-info/top_level.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/requirements/app.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/requirements/docs.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/requirements/opencompass.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/requirements/vlmeval.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/requirements.txt +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/setup.cfg +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/tests/__init__.py +0 -0
- {evalscope-1.0.0/tests/aigc → evalscope-1.0.2/tests/benchmark}/__init__.py +0 -0
- {evalscope-1.0.0/tests/aigc → evalscope-1.0.2/tests/benchmark}/test_t2i.py +0 -0
- {evalscope-1.0.0/tests/benchmark → evalscope-1.0.2/tests/cli}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/tests/cli/test_custom.py +0 -0
- {evalscope-1.0.0/tests/cli → evalscope-1.0.2/tests/perf}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/tests/rag/test_mteb.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/tests/rag/test_ragas.py +0 -0
- {evalscope-1.0.0/tests/perf → evalscope-1.0.2/tests/swift}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/tests/swift/test_run_swift_eval.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/tests/swift/test_run_swift_vlm_eval.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/tests/swift/test_run_swift_vlm_jugde_eval.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/tests/test_run_all.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/tests/utils.py +0 -0
- {evalscope-1.0.0/tests/swift → evalscope-1.0.2/tests/vlm}/__init__.py +0 -0
- {evalscope-1.0.0 → evalscope-1.0.2}/tests/vlm/test_vlmeval.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: evalscope
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.2
|
|
4
4
|
Summary: EvalScope: Lightweight LLMs Evaluation Framework
|
|
5
5
|
Home-page: https://github.com/modelscope/evalscope
|
|
6
6
|
Author: ModelScope team
|
|
@@ -146,7 +146,10 @@ Please scan the QR code below to join our community groups:
|
|
|
146
146
|
>
|
|
147
147
|
> Version 1.0 introduces a major overhaul of the evaluation framework, establishing a new, more modular and extensible API layer under `evalscope/api`. Key improvements include standardized data models for benchmarks, samples, and results; a registry-based design for components such as benchmarks and metrics; and a rewritten core evaluator that orchestrates the new architecture. Existing benchmark adapters have been migrated to this API, resulting in cleaner, more consistent, and easier-to-maintain implementations.
|
|
148
148
|
|
|
149
|
-
- 🔥 **[2025.
|
|
149
|
+
- 🔥 **[2025.09.19]** Added support for multimodal image-text evaluation benchmarks including RealWorldQA, AI2D, MMStar, MMBench, and OmniBench, as well as pure text evaluation benchmarks such as Multi-IF, HealthBench, and AMC.
|
|
150
|
+
- 🔥 **[2025.09.05]** Added support for vision-language multimodal model evaluation tasks, such as MathVista and MMMU. For more supported datasets, please [refer to the documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/vlm.html).
|
|
151
|
+
- 🔥 **[2025.09.04]** Added support for image editing task evaluation, including the [GEdit-Bench](https://modelscope.cn/datasets/stepfun-ai/GEdit-Bench) benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/image_edit.html).
|
|
152
|
+
- 🔥 **[2025.08.22]** Version 1.0 Refactoring. Break changes, please [refer to](https://evalscope.readthedocs.io/en/latest/get_started/basic_usage.html#switching-to-version-v1-0).
|
|
150
153
|
- 🔥 **[2025.07.18]** The model stress testing now supports randomly generating image-text data for multimodal model evaluation. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#id4).
|
|
151
154
|
- 🔥 **[2025.07.16]** Support for [τ-bench](https://github.com/sierra-research/tau-bench) has been added, enabling the evaluation of AI Agent performance and reliability in real-world scenarios involving dynamic user and tool interactions. For usage instructions, please refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/llm.html#bench).
|
|
152
155
|
- 🔥 **[2025.07.14]** Support for "Humanity's Last Exam" ([Humanity's-Last-Exam](https://modelscope.cn/datasets/cais/hle)), a highly challenging evaluation benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/llm.html#humanity-s-last-exam).
|
|
@@ -154,7 +157,7 @@ Please scan the QR code below to join our community groups:
|
|
|
154
157
|
- 🔥 **[2025.06.28]** Optimized custom dataset evaluation: now supports evaluation without reference answers. Enhanced LLM judge usage, with built-in modes for "scoring directly without reference answers" and "checking answer consistency with reference answers". See [reference](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset/llm.html#qa) for details.
|
|
155
158
|
- 🔥 **[2025.06.19]** Added support for the [BFCL-v3](https://modelscope.cn/datasets/AI-ModelScope/bfcl_v3) benchmark, designed to evaluate model function-calling capabilities across various scenarios. For more information, refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/third_party/bfcl_v3.html).
|
|
156
159
|
- 🔥 **[2025.06.02]** Added support for the Needle-in-a-Haystack test. Simply specify `needle_haystack` to conduct the test, and a corresponding heatmap will be generated in the `outputs/reports` folder, providing a visual representation of the model's performance. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/needle_haystack.html) for more details.
|
|
157
|
-
- 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html).
|
|
160
|
+
- 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html).
|
|
158
161
|
- 🔥 **[2025.05.16]** Model service performance stress testing now supports setting various levels of concurrency and outputs a performance test report. [Reference example](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/quick_start.html#id3).
|
|
159
162
|
- 🔥 **[2025.05.13]** Added support for the [ToolBench-Static](https://modelscope.cn/datasets/AI-ModelScope/ToolBench-Static) dataset to evaluate model's tool-calling capabilities. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) for usage instructions. Also added support for the [DROP](https://modelscope.cn/datasets/AI-ModelScope/DROP/dataPeview) and [Winogrande](https://modelscope.cn/datasets/AI-ModelScope/winogrande_val) benchmarks to assess the reasoning capabilities of models.
|
|
160
163
|
<details><summary>More</summary>
|
|
@@ -163,7 +166,7 @@ Please scan the QR code below to join our community groups:
|
|
|
163
166
|
- 🔥 **[2025.04.27]** Support for text-to-image evaluation: Supports 8 metrics including MPS, HPSv2.1Score, etc., and evaluation benchmarks such as EvalMuse, GenAI-Bench. Refer to the [user documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/t2i.html) for more details.
|
|
164
167
|
- 🔥 **[2025.04.10]** Model service stress testing tool now supports the `/v1/completions` endpoint (the default endpoint for vLLM benchmarking)
|
|
165
168
|
- 🔥 **[2025.04.08]** Support for evaluating embedding model services compatible with the OpenAI API has been added. For more details, check the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html#configure-evaluation-parameters).
|
|
166
|
-
- 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
|
|
169
|
+
- 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
|
|
167
170
|
- 🔥 **[2025.03.20]** The model inference service stress testing now supports generating prompts of specified length using random values. Refer to the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#using-the-random-dataset) for more details.
|
|
168
171
|
- 🔥 **[2025.03.13]** Added support for the [LiveCodeBench](https://www.modelscope.cn/datasets/AI-ModelScope/code_generation_lite/summary) code evaluation benchmark, which can be used by specifying `live_code_bench`. Supports evaluating QwQ-32B on LiveCodeBench, refer to the [best practices](https://evalscope.readthedocs.io/en/latest/best_practice/eval_qwq.html).
|
|
169
172
|
- 🔥 **[2025.03.11]** Added support for the [SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/SimpleQA/summary) and [Chinese SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/Chinese-SimpleQA/summary) evaluation benchmarks. These are used to assess the factual accuracy of models, and you can specify `simple_qa` and `chinese_simpleqa` for use. Support for specifying a judge model is also available. For more details, refer to the [relevant parameter documentation](https://evalscope.readthedocs.io/en/latest/get_started/parameters.html).
|
|
@@ -362,7 +365,7 @@ run_task(task_cfg="config.json")
|
|
|
362
365
|
|
|
363
366
|
### Basic Parameter
|
|
364
367
|
- `--model`: Specifies the `model_id` of the model in [ModelScope](https://modelscope.cn/), which can be automatically downloaded, e.g., [Qwen/Qwen2.5-0.5B-Instruct](https://modelscope.cn/models/Qwen/Qwen2.5-0.5B-Instruct/summary); or use the local path of the model, e.g., `/path/to/model`
|
|
365
|
-
- `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
|
|
368
|
+
- `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
|
|
366
369
|
- `--limit`: Maximum amount of evaluation data for each dataset. If not specified, it defaults to evaluating all data. Can be used for quick validation
|
|
367
370
|
|
|
368
371
|
### Output Results
|
|
@@ -451,7 +454,7 @@ For more customized evaluations, such as customizing model parameters or dataset
|
|
|
451
454
|
evalscope eval \
|
|
452
455
|
--model Qwen/Qwen3-0.6B \
|
|
453
456
|
--model-args '{"revision": "master", "precision": "torch.float16", "device_map": "auto"}' \
|
|
454
|
-
--generation-config '{"do_sample":true,"temperature":0.6,"
|
|
457
|
+
--generation-config '{"do_sample":true,"temperature":0.6,"max_tokens":512,"chat_template_kwargs":{"enable_thinking": false}}' \
|
|
455
458
|
--dataset-args '{"gsm8k": {"few_shot_num": 0, "few_shot_random": false}}' \
|
|
456
459
|
--datasets gsm8k \
|
|
457
460
|
--limit 10
|
|
@@ -465,7 +468,7 @@ evalscope eval \
|
|
|
465
468
|
- `--generation-config`: Generation parameters, passed as a JSON string and parsed as a dictionary:
|
|
466
469
|
- `do_sample`: Whether to use sampling
|
|
467
470
|
- `temperature`: Generation temperature
|
|
468
|
-
- `
|
|
471
|
+
- `max_tokens`: Maximum length of generated tokens
|
|
469
472
|
- `chat_template_kwargs`: Model inference template parameters
|
|
470
473
|
- `--dataset-args`: Settings for the evaluation dataset, passed as a JSON string where the key is the dataset name and the value is the parameters. Note that these need to correspond one-to-one with the values in the `--datasets` parameter:
|
|
471
474
|
- `few_shot_num`: Number of few-shot examples
|
|
@@ -117,7 +117,10 @@ Please scan the QR code below to join our community groups:
|
|
|
117
117
|
>
|
|
118
118
|
> Version 1.0 introduces a major overhaul of the evaluation framework, establishing a new, more modular and extensible API layer under `evalscope/api`. Key improvements include standardized data models for benchmarks, samples, and results; a registry-based design for components such as benchmarks and metrics; and a rewritten core evaluator that orchestrates the new architecture. Existing benchmark adapters have been migrated to this API, resulting in cleaner, more consistent, and easier-to-maintain implementations.
|
|
119
119
|
|
|
120
|
-
- 🔥 **[2025.
|
|
120
|
+
- 🔥 **[2025.09.19]** Added support for multimodal image-text evaluation benchmarks including RealWorldQA, AI2D, MMStar, MMBench, and OmniBench, as well as pure text evaluation benchmarks such as Multi-IF, HealthBench, and AMC.
|
|
121
|
+
- 🔥 **[2025.09.05]** Added support for vision-language multimodal model evaluation tasks, such as MathVista and MMMU. For more supported datasets, please [refer to the documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/vlm.html).
|
|
122
|
+
- 🔥 **[2025.09.04]** Added support for image editing task evaluation, including the [GEdit-Bench](https://modelscope.cn/datasets/stepfun-ai/GEdit-Bench) benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/image_edit.html).
|
|
123
|
+
- 🔥 **[2025.08.22]** Version 1.0 Refactoring. Break changes, please [refer to](https://evalscope.readthedocs.io/en/latest/get_started/basic_usage.html#switching-to-version-v1-0).
|
|
121
124
|
- 🔥 **[2025.07.18]** The model stress testing now supports randomly generating image-text data for multimodal model evaluation. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#id4).
|
|
122
125
|
- 🔥 **[2025.07.16]** Support for [τ-bench](https://github.com/sierra-research/tau-bench) has been added, enabling the evaluation of AI Agent performance and reliability in real-world scenarios involving dynamic user and tool interactions. For usage instructions, please refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/llm.html#bench).
|
|
123
126
|
- 🔥 **[2025.07.14]** Support for "Humanity's Last Exam" ([Humanity's-Last-Exam](https://modelscope.cn/datasets/cais/hle)), a highly challenging evaluation benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/llm.html#humanity-s-last-exam).
|
|
@@ -125,7 +128,7 @@ Please scan the QR code below to join our community groups:
|
|
|
125
128
|
- 🔥 **[2025.06.28]** Optimized custom dataset evaluation: now supports evaluation without reference answers. Enhanced LLM judge usage, with built-in modes for "scoring directly without reference answers" and "checking answer consistency with reference answers". See [reference](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset/llm.html#qa) for details.
|
|
126
129
|
- 🔥 **[2025.06.19]** Added support for the [BFCL-v3](https://modelscope.cn/datasets/AI-ModelScope/bfcl_v3) benchmark, designed to evaluate model function-calling capabilities across various scenarios. For more information, refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/third_party/bfcl_v3.html).
|
|
127
130
|
- 🔥 **[2025.06.02]** Added support for the Needle-in-a-Haystack test. Simply specify `needle_haystack` to conduct the test, and a corresponding heatmap will be generated in the `outputs/reports` folder, providing a visual representation of the model's performance. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/needle_haystack.html) for more details.
|
|
128
|
-
- 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html).
|
|
131
|
+
- 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html).
|
|
129
132
|
- 🔥 **[2025.05.16]** Model service performance stress testing now supports setting various levels of concurrency and outputs a performance test report. [Reference example](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/quick_start.html#id3).
|
|
130
133
|
- 🔥 **[2025.05.13]** Added support for the [ToolBench-Static](https://modelscope.cn/datasets/AI-ModelScope/ToolBench-Static) dataset to evaluate model's tool-calling capabilities. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) for usage instructions. Also added support for the [DROP](https://modelscope.cn/datasets/AI-ModelScope/DROP/dataPeview) and [Winogrande](https://modelscope.cn/datasets/AI-ModelScope/winogrande_val) benchmarks to assess the reasoning capabilities of models.
|
|
131
134
|
<details><summary>More</summary>
|
|
@@ -134,7 +137,7 @@ Please scan the QR code below to join our community groups:
|
|
|
134
137
|
- 🔥 **[2025.04.27]** Support for text-to-image evaluation: Supports 8 metrics including MPS, HPSv2.1Score, etc., and evaluation benchmarks such as EvalMuse, GenAI-Bench. Refer to the [user documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/t2i.html) for more details.
|
|
135
138
|
- 🔥 **[2025.04.10]** Model service stress testing tool now supports the `/v1/completions` endpoint (the default endpoint for vLLM benchmarking)
|
|
136
139
|
- 🔥 **[2025.04.08]** Support for evaluating embedding model services compatible with the OpenAI API has been added. For more details, check the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html#configure-evaluation-parameters).
|
|
137
|
-
- 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
|
|
140
|
+
- 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
|
|
138
141
|
- 🔥 **[2025.03.20]** The model inference service stress testing now supports generating prompts of specified length using random values. Refer to the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#using-the-random-dataset) for more details.
|
|
139
142
|
- 🔥 **[2025.03.13]** Added support for the [LiveCodeBench](https://www.modelscope.cn/datasets/AI-ModelScope/code_generation_lite/summary) code evaluation benchmark, which can be used by specifying `live_code_bench`. Supports evaluating QwQ-32B on LiveCodeBench, refer to the [best practices](https://evalscope.readthedocs.io/en/latest/best_practice/eval_qwq.html).
|
|
140
143
|
- 🔥 **[2025.03.11]** Added support for the [SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/SimpleQA/summary) and [Chinese SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/Chinese-SimpleQA/summary) evaluation benchmarks. These are used to assess the factual accuracy of models, and you can specify `simple_qa` and `chinese_simpleqa` for use. Support for specifying a judge model is also available. For more details, refer to the [relevant parameter documentation](https://evalscope.readthedocs.io/en/latest/get_started/parameters.html).
|
|
@@ -333,7 +336,7 @@ run_task(task_cfg="config.json")
|
|
|
333
336
|
|
|
334
337
|
### Basic Parameter
|
|
335
338
|
- `--model`: Specifies the `model_id` of the model in [ModelScope](https://modelscope.cn/), which can be automatically downloaded, e.g., [Qwen/Qwen2.5-0.5B-Instruct](https://modelscope.cn/models/Qwen/Qwen2.5-0.5B-Instruct/summary); or use the local path of the model, e.g., `/path/to/model`
|
|
336
|
-
- `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
|
|
339
|
+
- `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
|
|
337
340
|
- `--limit`: Maximum amount of evaluation data for each dataset. If not specified, it defaults to evaluating all data. Can be used for quick validation
|
|
338
341
|
|
|
339
342
|
### Output Results
|
|
@@ -422,7 +425,7 @@ For more customized evaluations, such as customizing model parameters or dataset
|
|
|
422
425
|
evalscope eval \
|
|
423
426
|
--model Qwen/Qwen3-0.6B \
|
|
424
427
|
--model-args '{"revision": "master", "precision": "torch.float16", "device_map": "auto"}' \
|
|
425
|
-
--generation-config '{"do_sample":true,"temperature":0.6,"
|
|
428
|
+
--generation-config '{"do_sample":true,"temperature":0.6,"max_tokens":512,"chat_template_kwargs":{"enable_thinking": false}}' \
|
|
426
429
|
--dataset-args '{"gsm8k": {"few_shot_num": 0, "few_shot_random": false}}' \
|
|
427
430
|
--datasets gsm8k \
|
|
428
431
|
--limit 10
|
|
@@ -436,7 +439,7 @@ evalscope eval \
|
|
|
436
439
|
- `--generation-config`: Generation parameters, passed as a JSON string and parsed as a dictionary:
|
|
437
440
|
- `do_sample`: Whether to use sampling
|
|
438
441
|
- `temperature`: Generation temperature
|
|
439
|
-
- `
|
|
442
|
+
- `max_tokens`: Maximum length of generated tokens
|
|
440
443
|
- `chat_template_kwargs`: Model inference template parameters
|
|
441
444
|
- `--dataset-args`: Settings for the evaluation dataset, passed as a JSON string where the key is the dataset name and the value is the parameters. Note that these need to correspond one-to-one with the values in the `--datasets` parameter:
|
|
442
445
|
- `few_shot_num`: Number of few-shot examples
|
{evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/benchmark/adapters/default_data_adapter.py
RENAMED
|
@@ -241,6 +241,7 @@ class DefaultDataAdapter(DataAdapter):
|
|
|
241
241
|
filter_func=self.sample_filter,
|
|
242
242
|
limit=self.limit if not self.reformat_subset else None, # Limit number of samples if specified
|
|
243
243
|
repeats=self.repeats, # Number of repetitions for each sample
|
|
244
|
+
shuffle=self.shuffle, # Shuffle dataset if enabled
|
|
244
245
|
shuffle_choices=self.shuffle_choices, # Shuffle choices if requested
|
|
245
246
|
data_source=self.dataset_hub, # Data source configuration
|
|
246
247
|
)
|
|
@@ -641,9 +642,7 @@ class DefaultDataAdapter(DataAdapter):
|
|
|
641
642
|
"""
|
|
642
643
|
pass
|
|
643
644
|
|
|
644
|
-
def _on_generate_report(
|
|
645
|
-
self, scores: Dict[str, List[AggScore]], model_name: str, add_aggregation_name: bool = True
|
|
646
|
-
) -> Report:
|
|
645
|
+
def _on_generate_report(self, scores: Dict[str, List[AggScore]], model_name: str) -> Report:
|
|
647
646
|
"""
|
|
648
647
|
Hook method called during report generation.
|
|
649
648
|
|
|
@@ -659,7 +658,7 @@ class DefaultDataAdapter(DataAdapter):
|
|
|
659
658
|
Report: The generated evaluation report
|
|
660
659
|
"""
|
|
661
660
|
return ReportGenerator.generate_report(
|
|
662
|
-
score_dict=scores, model_name=model_name, data_adapter=self, add_aggregation_name=add_aggregation_name
|
|
661
|
+
score_dict=scores, model_name=model_name, data_adapter=self, add_aggregation_name=self.add_aggregation_name
|
|
663
662
|
)
|
|
664
663
|
|
|
665
664
|
@override
|
|
@@ -681,3 +680,7 @@ class DefaultDataAdapter(DataAdapter):
|
|
|
681
680
|
report = self._on_generate_report(scores, model_name=model_name)
|
|
682
681
|
self._on_generate_report_end(report, output_dir, **kwargs)
|
|
683
682
|
return report
|
|
683
|
+
|
|
684
|
+
def finalize(self, *args, **kwargs):
|
|
685
|
+
# Finalize the evaluation process
|
|
686
|
+
self.sandbox_finalize(*args, **kwargs)
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from typing import Optional
|
|
3
|
+
|
|
4
|
+
from evalscope.constants import EvalType, FileConstants
|
|
5
|
+
from evalscope.utils import get_logger
|
|
6
|
+
from evalscope.utils.function_utils import thread_safe
|
|
7
|
+
from evalscope.utils.io_utils import jsonl_to_list
|
|
8
|
+
from .text2image_adapter import Text2ImageAdapter
|
|
9
|
+
|
|
10
|
+
logger = get_logger()
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ImageEditAdapter(Text2ImageAdapter):
|
|
14
|
+
"""
|
|
15
|
+
Support two methods:
|
|
16
|
+
1. Inference using modelscope pipeline
|
|
17
|
+
2. Load local inference jsonl file with key to corresponding prompt
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(self, **kwargs):
|
|
21
|
+
super().__init__(**kwargs)
|
|
22
|
+
|
|
23
|
+
self.local_file = self.extra_params.get('local_file', None)
|
|
24
|
+
self.id_key = self.extra_params.get('id_key', FileConstants.ID)
|
|
25
|
+
self.image_key = self.extra_params.get('image_key', FileConstants.IMAGE_PATH)
|
|
26
|
+
self.local_data = self.load_local_file()
|
|
27
|
+
|
|
28
|
+
def load_local_file(self) -> Optional[dict]:
|
|
29
|
+
if not self.local_file:
|
|
30
|
+
return None
|
|
31
|
+
|
|
32
|
+
# Load file and check
|
|
33
|
+
data_list = jsonl_to_list(self.local_file)
|
|
34
|
+
data_dict = {}
|
|
35
|
+
for record in data_list:
|
|
36
|
+
if self.image_key not in record:
|
|
37
|
+
raise ValueError(f"Image key '{self.image_key}' not found in record: {record}, file {self.local_file}")
|
|
38
|
+
if self.id_key not in record:
|
|
39
|
+
raise ValueError(f"ID key '{self.id_key}' not found in record: {record}, file {self.local_file}")
|
|
40
|
+
|
|
41
|
+
image_path = record[self.image_key]
|
|
42
|
+
if not os.path.isabs(image_path):
|
|
43
|
+
image_path = os.path.join(os.path.dirname(self.local_file), image_path)
|
|
44
|
+
if not os.path.exists(image_path):
|
|
45
|
+
raise FileNotFoundError(f"Image file '{image_path}' not found.")
|
|
46
|
+
|
|
47
|
+
data_dict[record[self.id_key]] = record
|
|
48
|
+
return data_dict
|
|
49
|
+
|
|
50
|
+
def get_image_path_from_id(self, image_id) -> Optional[str]:
|
|
51
|
+
if not self.local_file:
|
|
52
|
+
return None
|
|
53
|
+
|
|
54
|
+
record = self.local_data.get(image_id)
|
|
55
|
+
if not record:
|
|
56
|
+
return None
|
|
57
|
+
|
|
58
|
+
return record[self.image_key]
|
|
59
|
+
|
|
60
|
+
def _post_process_samples(self):
|
|
61
|
+
super()._post_process_samples()
|
|
62
|
+
|
|
63
|
+
# Add local image path if exists
|
|
64
|
+
for subset in self.test_dataset.keys():
|
|
65
|
+
for sample in self.test_dataset[subset]:
|
|
66
|
+
local_image_path = self.get_image_path_from_id(sample.metadata.get(FileConstants.ID))
|
|
67
|
+
if local_image_path:
|
|
68
|
+
sample.metadata[FileConstants.IMAGE_PATH] = local_image_path
|
|
69
|
+
|
|
70
|
+
def sample_filter(self, sample) -> bool:
|
|
71
|
+
"""
|
|
72
|
+
Filter samples based on metadata availability.
|
|
73
|
+
If local file is not available, all samples are considered valid.
|
|
74
|
+
Otherwise, only samples with valid metadata and image path are kept.
|
|
75
|
+
"""
|
|
76
|
+
if not self.local_data:
|
|
77
|
+
return True
|
|
78
|
+
else:
|
|
79
|
+
sample_id = sample.metadata.get(FileConstants.ID)
|
|
80
|
+
if (not sample_id) or (not self.get_image_path_from_id(sample_id)):
|
|
81
|
+
return False
|
|
82
|
+
return True
|
{evalscope-1.0.0 → evalscope-1.0.2}/evalscope/api/benchmark/adapters/multi_choice_adapter.py
RENAMED
|
@@ -18,8 +18,11 @@ class MultiChoiceAdapter(DefaultDataAdapter):
|
|
|
18
18
|
This adapter formats the input for multi-choice questions and handles few-shot examples.
|
|
19
19
|
"""
|
|
20
20
|
|
|
21
|
-
|
|
22
|
-
|
|
21
|
+
def __init__(self, **kwargs):
|
|
22
|
+
super().__init__(**kwargs)
|
|
23
|
+
|
|
24
|
+
self.multiple_correct: bool = False
|
|
25
|
+
"""Whether the benchmark allows multiple correct answers."""
|
|
23
26
|
|
|
24
27
|
def format_prompt_template(self, sample: Sample) -> str:
|
|
25
28
|
"""
|
|
@@ -8,7 +8,7 @@ from evalscope.api.messages.content import ContentImage
|
|
|
8
8
|
from evalscope.api.metric import Score
|
|
9
9
|
from evalscope.api.model import ChatCompletionChoice, Model, ModelOutput
|
|
10
10
|
from evalscope.api.registry import get_metric
|
|
11
|
-
from evalscope.constants import EvalType
|
|
11
|
+
from evalscope.constants import EvalType, FileConstants
|
|
12
12
|
from evalscope.utils import get_logger
|
|
13
13
|
from evalscope.utils.function_utils import thread_safe
|
|
14
14
|
from .default_data_adapter import DefaultDataAdapter
|
|
@@ -19,6 +19,11 @@ logger = get_logger()
|
|
|
19
19
|
class Text2ImageAdapter(DefaultDataAdapter):
|
|
20
20
|
"""Text to Image Adapter for benchmarks."""
|
|
21
21
|
|
|
22
|
+
def __init__(self, **kwargs):
|
|
23
|
+
super().__init__(**kwargs)
|
|
24
|
+
|
|
25
|
+
self.add_aggregation_name = False # Do not add aggregation name in the report by default
|
|
26
|
+
|
|
22
27
|
def load_from_disk(self, **kwargs):
|
|
23
28
|
return super().load_from_disk(use_local_loader=True)
|
|
24
29
|
|
|
@@ -27,11 +32,12 @@ class Text2ImageAdapter(DefaultDataAdapter):
|
|
|
27
32
|
return Sample(
|
|
28
33
|
input=[ChatMessageUser(content=record['prompt'])],
|
|
29
34
|
metadata={
|
|
30
|
-
'id': record['id'],
|
|
31
35
|
'prompt': record['prompt'],
|
|
32
36
|
'category': record.get('category', ''),
|
|
33
37
|
'tags': record.get('tags', []),
|
|
34
|
-
|
|
38
|
+
FileConstants.ID: record[FileConstants.ID],
|
|
39
|
+
FileConstants.IMAGE_PATH: record.get(FileConstants.IMAGE_PATH,
|
|
40
|
+
''), # Optional field for existing image path
|
|
35
41
|
}
|
|
36
42
|
)
|
|
37
43
|
|
|
@@ -83,7 +89,7 @@ class Text2ImageAdapter(DefaultDataAdapter):
|
|
|
83
89
|
completed=True,
|
|
84
90
|
)
|
|
85
91
|
else:
|
|
86
|
-
image_id = f
|
|
92
|
+
image_id = f'{sample.metadata.get(FileConstants.ID, sample.id)}_{sample.group_id}'
|
|
87
93
|
output_path = os.path.join(output_dir, 'images', f'{image_id}.png')
|
|
88
94
|
if not os.path.exists(os.path.dirname(output_path)):
|
|
89
95
|
os.makedirs(os.path.dirname(output_path))
|
|
@@ -96,7 +102,7 @@ class Text2ImageAdapter(DefaultDataAdapter):
|
|
|
96
102
|
with open(output_path, 'wb') as f:
|
|
97
103
|
f.write(base64.b64decode(image_base64))
|
|
98
104
|
|
|
99
|
-
sample.metadata[
|
|
105
|
+
sample.metadata[FileConstants.IMAGE_PATH] = output_path
|
|
100
106
|
return TaskState(
|
|
101
107
|
model=model.name,
|
|
102
108
|
sample=sample,
|
|
@@ -111,7 +117,7 @@ class Text2ImageAdapter(DefaultDataAdapter):
|
|
|
111
117
|
self, original_prediction: str, filtered_prediction: str, reference: str, task_state: TaskState
|
|
112
118
|
) -> Score:
|
|
113
119
|
# Get prediction and prompt from task state
|
|
114
|
-
image_path = task_state.metadata.get(
|
|
120
|
+
image_path = task_state.metadata.get(FileConstants.IMAGE_PATH, original_prediction)
|
|
115
121
|
prompt = task_state.input[0].content
|
|
116
122
|
meta = task_state.metadata
|
|
117
123
|
|
|
@@ -149,7 +155,3 @@ class Text2ImageAdapter(DefaultDataAdapter):
|
|
|
149
155
|
score.metadata[metric_name] = f'error: {str(e)}'
|
|
150
156
|
|
|
151
157
|
return score
|
|
152
|
-
|
|
153
|
-
def _on_generate_report(self, scores, model_name, add_aggregation_name=True):
|
|
154
|
-
# Don't add aggregation name for needle haystack adapter
|
|
155
|
-
return super()._on_generate_report(scores, model_name, False)
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
from .default_data_adapter import DefaultDataAdapter
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class VisionLanguageAdapter(DefaultDataAdapter):
|
|
5
|
+
"""Adapter for vision-language benchmarks. e.g., image captioning, visual question answering, etc."""
|
|
6
|
+
|
|
7
|
+
def __init__(self, **kwargs):
|
|
8
|
+
super().__init__(**kwargs)
|
|
@@ -9,7 +9,7 @@ from evalscope.api.dataset import DatasetDict, Sample
|
|
|
9
9
|
from evalscope.api.evaluator import TaskState
|
|
10
10
|
from evalscope.api.filter import FilterEnsemble, build_filter_ensemble
|
|
11
11
|
from evalscope.api.metric import AggScore, SampleScore
|
|
12
|
-
from evalscope.api.mixin import LLMJudgeMixin
|
|
12
|
+
from evalscope.api.mixin import LLMJudgeMixin, SandboxMixin
|
|
13
13
|
from evalscope.api.model import Model
|
|
14
14
|
from evalscope.report import Report
|
|
15
15
|
from evalscope.utils.logger import get_logger
|
|
@@ -21,7 +21,7 @@ if TYPE_CHECKING:
|
|
|
21
21
|
logger = get_logger()
|
|
22
22
|
|
|
23
23
|
|
|
24
|
-
class DataAdapter(LLMJudgeMixin, ABC):
|
|
24
|
+
class DataAdapter(LLMJudgeMixin, SandboxMixin, ABC):
|
|
25
25
|
"""
|
|
26
26
|
Data Adapter for the benchmark.
|
|
27
27
|
"""
|
|
@@ -43,6 +43,12 @@ class DataAdapter(LLMJudgeMixin, ABC):
|
|
|
43
43
|
self.save_metadata = True
|
|
44
44
|
"""Whether to save metadata in the review result"""
|
|
45
45
|
|
|
46
|
+
self.add_aggregation_name = True
|
|
47
|
+
"""Whether to add aggregation name in the report"""
|
|
48
|
+
|
|
49
|
+
self.add_overall_metric = True
|
|
50
|
+
"""Whether to add overall metric in the report"""
|
|
51
|
+
|
|
46
52
|
self.category_map = {}
|
|
47
53
|
"""Category map for the benchmark"""
|
|
48
54
|
|
|
@@ -86,6 +92,11 @@ class DataAdapter(LLMJudgeMixin, ABC):
|
|
|
86
92
|
"""
|
|
87
93
|
pass
|
|
88
94
|
|
|
95
|
+
@abstractmethod
|
|
96
|
+
def finalize(self, *args, **kwargs) -> None:
|
|
97
|
+
"""Finalize the evaluation process."""
|
|
98
|
+
pass
|
|
99
|
+
|
|
89
100
|
@property
|
|
90
101
|
def name(self) -> str:
|
|
91
102
|
"""
|
|
@@ -170,6 +181,13 @@ class DataAdapter(LLMJudgeMixin, ABC):
|
|
|
170
181
|
"""
|
|
171
182
|
return self._benchmark_meta.default_subset
|
|
172
183
|
|
|
184
|
+
@default_subset.setter
|
|
185
|
+
def default_subset(self, value: str):
|
|
186
|
+
"""
|
|
187
|
+
Set the default subset of the benchmark.
|
|
188
|
+
"""
|
|
189
|
+
self._benchmark_meta.default_subset = value
|
|
190
|
+
|
|
173
191
|
@property
|
|
174
192
|
def few_shot_num(self) -> int:
|
|
175
193
|
"""
|
|
@@ -299,6 +317,48 @@ class DataAdapter(LLMJudgeMixin, ABC):
|
|
|
299
317
|
"""
|
|
300
318
|
return self._task_config.seed
|
|
301
319
|
|
|
320
|
+
@property
|
|
321
|
+
def shuffle(self) -> bool:
|
|
322
|
+
"""
|
|
323
|
+
Return whether to shuffle the dataset before evaluation.
|
|
324
|
+
"""
|
|
325
|
+
return self._benchmark_meta.shuffle
|
|
326
|
+
|
|
327
|
+
@shuffle.setter
|
|
328
|
+
def shuffle(self, value: bool):
|
|
329
|
+
"""
|
|
330
|
+
Set whether to shuffle the dataset before evaluation.
|
|
331
|
+
"""
|
|
332
|
+
self._benchmark_meta.shuffle = value
|
|
333
|
+
|
|
334
|
+
@property
|
|
335
|
+
def shuffle_choices(self) -> bool:
|
|
336
|
+
"""
|
|
337
|
+
Return whether to shuffle the choices in multiple-choice datasets.
|
|
338
|
+
"""
|
|
339
|
+
return self._benchmark_meta.shuffle_choices
|
|
340
|
+
|
|
341
|
+
@shuffle_choices.setter
|
|
342
|
+
def shuffle_choices(self, value: bool):
|
|
343
|
+
"""
|
|
344
|
+
Set whether to shuffle the choices in multiple-choice datasets.
|
|
345
|
+
"""
|
|
346
|
+
self._benchmark_meta.shuffle_choices = value
|
|
347
|
+
|
|
348
|
+
@property
|
|
349
|
+
def review_timeout(self) -> Optional[float]:
|
|
350
|
+
"""
|
|
351
|
+
Return the timeout for the review process.
|
|
352
|
+
"""
|
|
353
|
+
return self._benchmark_meta.review_timeout
|
|
354
|
+
|
|
355
|
+
@review_timeout.setter
|
|
356
|
+
def review_timeout(self, value: float):
|
|
357
|
+
"""
|
|
358
|
+
Set the timeout for the review process.
|
|
359
|
+
"""
|
|
360
|
+
self._benchmark_meta.review_timeout = value
|
|
361
|
+
|
|
302
362
|
@contextlib.contextmanager
|
|
303
363
|
def _temporary_attribute(self, attr_name: str, new_value):
|
|
304
364
|
"""
|
|
@@ -73,6 +73,15 @@ class BenchmarkMeta:
|
|
|
73
73
|
aggregation: str = 'mean'
|
|
74
74
|
""" Aggregation function for the metrics. Default is 'mean'. Can be 'mean', 'pass@<k>' or a custom function name."""
|
|
75
75
|
|
|
76
|
+
shuffle: bool = False
|
|
77
|
+
"""Whether to shuffle the dataset before evaluation."""
|
|
78
|
+
|
|
79
|
+
shuffle_choices: bool = False
|
|
80
|
+
"""Whether to shuffle the choices in multiple-choice datasets."""
|
|
81
|
+
|
|
82
|
+
review_timeout: Optional[float] = None
|
|
83
|
+
""" Timeout for review in seconds."""
|
|
84
|
+
|
|
76
85
|
extra_params: Dict = field(default_factory=dict)
|
|
77
86
|
""" Additional parameters for the benchmark."""
|
|
78
87
|
|
|
@@ -5,9 +5,8 @@ from dataclasses import dataclass, field
|
|
|
5
5
|
from pydantic import BaseModel, Field
|
|
6
6
|
from typing import Any, Callable, Dict, Iterator, List, Optional, Sequence, Union
|
|
7
7
|
|
|
8
|
-
from evalscope.api.messages import ChatMessage,
|
|
8
|
+
from evalscope.api.messages import ChatMessage, messages_to_markdown
|
|
9
9
|
from evalscope.api.tool import ToolInfo
|
|
10
|
-
from evalscope.utils.multi_choices import answer_character, answer_index
|
|
11
10
|
|
|
12
11
|
|
|
13
12
|
class Sample(BaseModel):
|
|
@@ -31,9 +30,6 @@ class Sample(BaseModel):
|
|
|
31
30
|
tools: Optional[List[ToolInfo]] = None
|
|
32
31
|
"""List of tools available to the model during inference (optional)."""
|
|
33
32
|
|
|
34
|
-
category: Optional[str] = None
|
|
35
|
-
"""Category of the sample (optional)."""
|
|
36
|
-
|
|
37
33
|
subset_key: Optional[str] = None
|
|
38
34
|
"""Key for the subset this sample belongs to, used for generating subsets (optional)."""
|
|
39
35
|
|
|
@@ -54,7 +50,7 @@ class Sample(BaseModel):
|
|
|
54
50
|
if isinstance(self.input, str):
|
|
55
51
|
input_text = self.input
|
|
56
52
|
else:
|
|
57
|
-
input_text =
|
|
53
|
+
input_text = messages_to_markdown(self.input, max_length=50)
|
|
58
54
|
return f'Sample ID: {self.id}\nInput: {input_text}\nTarget: {self.target}'
|
|
59
55
|
|
|
60
56
|
|
|
@@ -230,6 +226,8 @@ class MemoryDataset(Dataset):
|
|
|
230
226
|
self._shuffled = True
|
|
231
227
|
|
|
232
228
|
def shuffle_choices(self, seed: Optional[int] = None) -> None:
|
|
229
|
+
from evalscope.utils.multi_choices import answer_character
|
|
230
|
+
|
|
233
231
|
rand = random.Random(seed)
|
|
234
232
|
for sample in self.samples:
|
|
235
233
|
if not sample.choices:
|
|
@@ -249,6 +247,8 @@ class MemoryDataset(Dataset):
|
|
|
249
247
|
sample.target = self._remap_target(sample.target, position_map=position_map)
|
|
250
248
|
|
|
251
249
|
def _remap_target(self, target: Union[str, List[str]], position_map: Dict[int, str]) -> Union[str, List[str]]:
|
|
250
|
+
from evalscope.utils.multi_choices import answer_index
|
|
251
|
+
|
|
252
252
|
if isinstance(target, list):
|
|
253
253
|
return [position_map[answer_index(t)] for t in target]
|
|
254
254
|
else:
|
|
@@ -126,7 +126,8 @@ class RemoteDataLoader(DataLoader):
|
|
|
126
126
|
self.limit = int(len(dataset) * self.limit)
|
|
127
127
|
elif isinstance(self.limit, int) and self.limit < 0:
|
|
128
128
|
raise ValueError('Limit must be a non-negative integer or a float between 0 and 1.')
|
|
129
|
-
dataset
|
|
129
|
+
if len(dataset) > self.limit:
|
|
130
|
+
dataset = dataset.select(range(self.limit))
|
|
130
131
|
|
|
131
132
|
# convert to list
|
|
132
133
|
dataset = dataset.to_list()
|
|
@@ -299,6 +299,15 @@ class ModelResult(BaseModel):
|
|
|
299
299
|
completed=True, # Mark as completed since it was cached
|
|
300
300
|
)
|
|
301
301
|
|
|
302
|
+
def pretty_print(self) -> str:
|
|
303
|
+
"""
|
|
304
|
+
Generate a pretty-printed string representation of the model result.
|
|
305
|
+
|
|
306
|
+
Returns:
|
|
307
|
+
A string representation of the model result
|
|
308
|
+
"""
|
|
309
|
+
return self.model_dump_json(indent=2)
|
|
310
|
+
|
|
302
311
|
|
|
303
312
|
class ReviewResult(BaseModel):
|
|
304
313
|
"""
|
|
@@ -340,7 +349,7 @@ class ReviewResult(BaseModel):
|
|
|
340
349
|
|
|
341
350
|
return cls(
|
|
342
351
|
index=state.sample_id,
|
|
343
|
-
input=state.
|
|
352
|
+
input=state.input_markdown,
|
|
344
353
|
target=state.target,
|
|
345
354
|
sample_score=sample_score,
|
|
346
355
|
)
|
|
@@ -353,3 +362,17 @@ class ReviewResult(BaseModel):
|
|
|
353
362
|
The sample score object
|
|
354
363
|
"""
|
|
355
364
|
return self.sample_score
|
|
365
|
+
|
|
366
|
+
def pretty_print(self) -> str:
|
|
367
|
+
"""
|
|
368
|
+
Generate a pretty-printed string representation of the review result.
|
|
369
|
+
|
|
370
|
+
Returns:
|
|
371
|
+
A string representation of the review result
|
|
372
|
+
"""
|
|
373
|
+
output = [
|
|
374
|
+
f'Review Result for Sample {self.index}:',
|
|
375
|
+
f'Target: {self.target}',
|
|
376
|
+
f'Score: {self.sample_score.model_dump_json(indent=2)}',
|
|
377
|
+
]
|
|
378
|
+
return '\n'.join(output)
|
|
@@ -3,7 +3,7 @@ from random import Random
|
|
|
3
3
|
from typing import Any, Dict, List, Optional, Sequence, Union, overload
|
|
4
4
|
|
|
5
5
|
from evalscope.api.dataset import Sample
|
|
6
|
-
from evalscope.api.messages import ChatMessage, ChatMessageUser, messages_pretty_str
|
|
6
|
+
from evalscope.api.messages import ChatMessage, ChatMessageUser, messages_pretty_str, messages_to_markdown
|
|
7
7
|
from evalscope.api.model import ModelOutput
|
|
8
8
|
|
|
9
9
|
|
|
@@ -188,6 +188,17 @@ class TaskState:
|
|
|
188
188
|
else:
|
|
189
189
|
return messages_pretty_str(self._input)
|
|
190
190
|
|
|
191
|
+
@property
|
|
192
|
+
def input_markdown(self) -> str:
|
|
193
|
+
"""Get the input text as markdown.
|
|
194
|
+
|
|
195
|
+
For multi-modal content, images will be represented in markdown format.
|
|
196
|
+
"""
|
|
197
|
+
if isinstance(self._input, str):
|
|
198
|
+
return self._input
|
|
199
|
+
else:
|
|
200
|
+
return messages_to_markdown(self._input)
|
|
201
|
+
|
|
191
202
|
@property
|
|
192
203
|
def choices(self) -> Choices:
|
|
193
204
|
"""Choices for the sample, if applicable."""
|
|
@@ -262,3 +273,8 @@ class TaskState:
|
|
|
262
273
|
def target(self) -> str:
|
|
263
274
|
"""The scoring target for this `Sample`."""
|
|
264
275
|
return self._target.text
|
|
276
|
+
|
|
277
|
+
@target.setter
|
|
278
|
+
def target(self, text: str) -> None:
|
|
279
|
+
"""Set the target for review purposes."""
|
|
280
|
+
self._target = Target(text)
|
|
@@ -6,6 +6,7 @@ from .chat_message import (
|
|
|
6
6
|
ChatMessageUser,
|
|
7
7
|
dict_to_chat_message,
|
|
8
8
|
messages_pretty_str,
|
|
9
|
+
messages_to_markdown,
|
|
9
10
|
)
|
|
10
11
|
from .content import Content, ContentAudio, ContentData, ContentImage, ContentReasoning, ContentText, ContentVideo
|
|
11
12
|
from .utils import parse_content_with_reasoning
|