evalscope 0.17.1__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of evalscope might be problematic. Click here for more details.
- evalscope/__init__.py +4 -1
- evalscope/api/benchmark/__init__.py +3 -0
- evalscope/api/benchmark/adapters/__init__.py +5 -0
- evalscope/api/benchmark/adapters/default_data_adapter.py +684 -0
- evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
- evalscope/api/benchmark/adapters/multi_choice_adapter.py +83 -0
- evalscope/api/benchmark/adapters/text2image_adapter.py +156 -0
- evalscope/api/benchmark/adapters/vision_language_adapter.py +6 -0
- evalscope/api/benchmark/benchmark.py +356 -0
- evalscope/api/benchmark/meta.py +121 -0
- evalscope/api/dataset/__init__.py +2 -0
- evalscope/api/dataset/dataset.py +349 -0
- evalscope/api/dataset/loader.py +262 -0
- evalscope/api/dataset/utils.py +143 -0
- evalscope/api/evaluator/__init__.py +3 -0
- evalscope/api/evaluator/cache.py +378 -0
- evalscope/api/evaluator/evaluator.py +56 -0
- evalscope/api/evaluator/state.py +275 -0
- evalscope/api/filter/__init__.py +1 -0
- evalscope/api/filter/filter.py +72 -0
- evalscope/api/messages/__init__.py +12 -0
- evalscope/api/messages/chat_message.py +243 -0
- evalscope/api/messages/content.py +102 -0
- evalscope/api/messages/utils.py +35 -0
- evalscope/api/metric/__init__.py +2 -0
- evalscope/api/metric/metric.py +55 -0
- evalscope/api/metric/scorer.py +113 -0
- evalscope/api/mixin/__init__.py +1 -0
- evalscope/api/mixin/llm_judge_mixin.py +168 -0
- evalscope/api/model/__init__.py +12 -0
- evalscope/api/model/generate_config.py +155 -0
- evalscope/api/model/model.py +386 -0
- evalscope/api/model/model_output.py +285 -0
- evalscope/api/registry.py +182 -0
- evalscope/api/tool/__init__.py +3 -0
- evalscope/api/tool/tool_call.py +101 -0
- evalscope/api/tool/tool_info.py +173 -0
- evalscope/api/tool/utils.py +64 -0
- evalscope/app/app.py +3 -0
- evalscope/app/ui/app_ui.py +2 -1
- evalscope/app/ui/multi_model.py +50 -25
- evalscope/app/ui/single_model.py +26 -14
- evalscope/app/utils/data_utils.py +43 -27
- evalscope/app/utils/env_utils.py +12 -0
- evalscope/app/utils/text_utils.py +14 -14
- evalscope/app/utils/visualization.py +9 -4
- evalscope/arguments.py +7 -10
- evalscope/backend/opencompass/api_meta_template.py +2 -1
- evalscope/backend/opencompass/backend_manager.py +6 -5
- evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +10 -10
- evalscope/backend/rag_eval/clip_benchmark/task_template.py +8 -4
- evalscope/backend/rag_eval/ragas/task_template.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/build_transform.py +7 -4
- evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +2 -1
- evalscope/backend/rag_eval/utils/embedding.py +10 -1
- evalscope/backend/rag_eval/utils/llm.py +13 -12
- evalscope/benchmarks/__init__.py +0 -2
- evalscope/benchmarks/aime/aime24_adapter.py +38 -40
- evalscope/benchmarks/aime/aime25_adapter.py +34 -40
- evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +86 -60
- evalscope/benchmarks/arc/arc_adapter.py +34 -147
- evalscope/benchmarks/arena_hard/arena_hard_adapter.py +96 -70
- evalscope/benchmarks/arena_hard/utils.py +37 -1
- evalscope/benchmarks/bbh/bbh_adapter.py +72 -144
- evalscope/benchmarks/bfcl/bfcl_adapter.py +188 -171
- evalscope/benchmarks/bfcl/generation.py +222 -0
- evalscope/benchmarks/ceval/ceval_adapter.py +93 -162
- evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +85 -82
- evalscope/benchmarks/cmmlu/cmmlu_adapter.py +34 -125
- evalscope/benchmarks/competition_math/competition_math_adapter.py +56 -108
- evalscope/benchmarks/data_collection/data_collection_adapter.py +187 -45
- evalscope/benchmarks/docmath/docmath_adapter.py +109 -51
- evalscope/benchmarks/docmath/utils.py +4 -5
- evalscope/benchmarks/drop/drop_adapter.py +88 -40
- evalscope/benchmarks/frames/frames_adapter.py +136 -52
- evalscope/benchmarks/general_arena/general_arena_adapter.py +140 -98
- evalscope/benchmarks/general_arena/utils.py +23 -27
- evalscope/benchmarks/general_mcq/general_mcq_adapter.py +40 -101
- evalscope/benchmarks/general_qa/general_qa_adapter.py +73 -134
- evalscope/benchmarks/gpqa/gpqa_adapter.py +61 -100
- evalscope/benchmarks/gpqa/{chain_of_thought.txt → prompt.py} +12 -5
- evalscope/benchmarks/gsm8k/gsm8k_adapter.py +62 -142
- evalscope/benchmarks/hellaswag/hellaswag_adapter.py +35 -124
- evalscope/benchmarks/hle/hle_adapter.py +127 -93
- evalscope/benchmarks/humaneval/humaneval_adapter.py +86 -55
- evalscope/benchmarks/ifeval/ifeval_adapter.py +69 -40
- evalscope/benchmarks/ifeval/instructions.py +109 -64
- evalscope/benchmarks/ifeval/instructions_registry.py +1 -1
- evalscope/benchmarks/ifeval/instructions_util.py +2 -3
- evalscope/benchmarks/ifeval/utils.py +6 -7
- evalscope/benchmarks/image_edit/gedit/__init__.py +0 -0
- evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
- evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
- evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
- evalscope/benchmarks/iquiz/iquiz_adapter.py +30 -65
- evalscope/benchmarks/live_code_bench/evaluate_utils.py +2 -2
- evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +121 -71
- evalscope/benchmarks/live_code_bench/load_utils.py +13 -21
- evalscope/benchmarks/live_code_bench/testing_util.py +6 -2
- evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +49 -75
- evalscope/benchmarks/math_500/math_500_adapter.py +41 -48
- evalscope/benchmarks/math_vista/__init__.py +0 -0
- evalscope/benchmarks/math_vista/math_vista_adapter.py +129 -0
- evalscope/benchmarks/mmlu/mmlu_adapter.py +32 -205
- evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +80 -99
- evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +64 -110
- evalscope/benchmarks/mmmu/__init__.py +0 -0
- evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
- evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
- evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +129 -0
- evalscope/benchmarks/musr/musr_adapter.py +33 -64
- evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +196 -152
- evalscope/benchmarks/process_bench/process_bench_adapter.py +144 -76
- evalscope/benchmarks/race/race_adapter.py +33 -119
- evalscope/benchmarks/simple_qa/simple_qa_adapter.py +72 -70
- evalscope/benchmarks/super_gpqa/{five_shot_prompt.txt → prompt.py} +14 -16
- evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +73 -117
- evalscope/benchmarks/super_gpqa/utils.py +2 -1
- evalscope/benchmarks/tau_bench/generation.py +147 -0
- evalscope/benchmarks/tau_bench/tau_bench_adapter.py +114 -60
- evalscope/benchmarks/text2image/__init__.py +0 -0
- evalscope/benchmarks/text2image/evalmuse_adapter.py +78 -0
- evalscope/benchmarks/text2image/genai_bench_adapter.py +53 -0
- evalscope/benchmarks/text2image/general_t2i_adapter.py +42 -0
- evalscope/benchmarks/text2image/hpdv2_adapter.py +52 -0
- evalscope/benchmarks/text2image/tifa_adapter.py +27 -0
- evalscope/benchmarks/tool_bench/tool_bench_adapter.py +91 -70
- evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +56 -124
- evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +70 -266
- evalscope/benchmarks/winogrande/winogrande_adapter.py +28 -54
- evalscope/cli/cli.py +2 -0
- evalscope/cli/start_app.py +7 -1
- evalscope/cli/start_perf.py +7 -1
- evalscope/cli/start_server.py +6 -3
- evalscope/collections/__init__.py +2 -10
- evalscope/collections/sampler.py +10 -10
- evalscope/collections/schema.py +13 -11
- evalscope/config.py +157 -57
- evalscope/constants.py +37 -61
- evalscope/evaluator/__init__.py +1 -1
- evalscope/evaluator/evaluator.py +275 -419
- evalscope/filters/__init__.py +2 -0
- evalscope/filters/extraction.py +126 -0
- evalscope/filters/selection.py +57 -0
- evalscope/metrics/__init__.py +13 -13
- evalscope/metrics/llm_judge.py +47 -33
- evalscope/metrics/math_parser.py +27 -22
- evalscope/metrics/metric.py +307 -0
- evalscope/metrics/metrics.py +22 -18
- evalscope/metrics/t2v_metrics/__init__.py +0 -52
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +4 -2
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +9 -13
- evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +2 -1
- evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +3 -2
- evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +2 -1
- evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +2 -2
- evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +2 -1
- evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +4 -2
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +10 -5
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +4 -2
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +2 -1
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +15 -9
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +4 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +15 -10
- evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +9 -6
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +2 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +4 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +4 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +3 -9
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +16 -10
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +3 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +4 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +8 -4
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +47 -25
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +12 -7
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +23 -17
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +33 -23
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +2 -1
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +46 -30
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +69 -37
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +7 -5
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +6 -4
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +7 -5
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +3 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +5 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +17 -13
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +35 -19
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +14 -12
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +63 -52
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +63 -38
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +6 -3
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +6 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +3 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +15 -13
- evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +3 -2
- evalscope/models/__init__.py +6 -29
- evalscope/models/image_edit_model.py +125 -0
- evalscope/models/mockllm.py +65 -0
- evalscope/models/model_apis.py +67 -0
- evalscope/models/modelscope.py +455 -0
- evalscope/models/openai_compatible.py +126 -0
- evalscope/models/text2image_model.py +124 -0
- evalscope/models/utils/openai.py +701 -0
- evalscope/perf/benchmark.py +4 -1
- evalscope/perf/http_client.py +4 -2
- evalscope/perf/plugin/api/custom_api.py +5 -4
- evalscope/perf/plugin/api/openai_api.py +11 -9
- evalscope/perf/plugin/datasets/custom.py +2 -1
- evalscope/perf/plugin/datasets/flickr8k.py +1 -1
- evalscope/perf/plugin/datasets/kontext_bench.py +1 -1
- evalscope/perf/plugin/datasets/line_by_line.py +2 -1
- evalscope/perf/plugin/datasets/longalpaca.py +2 -1
- evalscope/perf/plugin/datasets/openqa.py +4 -2
- evalscope/perf/utils/benchmark_util.py +15 -10
- evalscope/perf/utils/db_util.py +9 -6
- evalscope/perf/utils/local_server.py +11 -3
- evalscope/perf/utils/rich_display.py +16 -10
- evalscope/report/__init__.py +2 -3
- evalscope/report/combinator.py +18 -12
- evalscope/report/generator.py +51 -35
- evalscope/report/{utils.py → report.py} +8 -6
- evalscope/run.py +33 -47
- evalscope/summarizer.py +1 -1
- evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -4
- evalscope/utils/__init__.py +21 -2
- evalscope/utils/chat_service.py +3 -2
- evalscope/utils/deprecation_utils.py +12 -1
- evalscope/utils/function_utils.py +29 -0
- evalscope/utils/import_utils.py +23 -1
- evalscope/utils/io_utils.py +142 -6
- evalscope/utils/json_schema.py +208 -0
- evalscope/utils/logger.py +51 -12
- evalscope/utils/model_utils.py +11 -7
- evalscope/utils/multi_choices.py +288 -0
- evalscope/utils/url_utils.py +65 -0
- evalscope/version.py +2 -2
- {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/METADATA +108 -62
- {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/RECORD +258 -226
- tests/benchmark/test_eval.py +385 -0
- tests/benchmark/test_image_edit.py +65 -0
- tests/{aigc → benchmark}/test_t2i.py +22 -4
- tests/benchmark/test_vlm.py +80 -0
- tests/cli/test_all.py +85 -47
- tests/cli/test_collection.py +20 -8
- tests/cli/test_custom.py +22 -15
- tests/cli/test_reasoning.py +81 -0
- tests/common.py +73 -0
- tests/perf/test_perf.py +4 -2
- tests/rag/test_clip_benchmark.py +0 -2
- evalscope/benchmarks/aigc/t2i/base.py +0 -56
- evalscope/benchmarks/aigc/t2i/evalmuse_adapter.py +0 -78
- evalscope/benchmarks/aigc/t2i/genai_bench_adapter.py +0 -58
- evalscope/benchmarks/aigc/t2i/general_t2i_adapter.py +0 -58
- evalscope/benchmarks/aigc/t2i/hpdv2_adapter.py +0 -57
- evalscope/benchmarks/aigc/t2i/tifa_adapter.py +0 -37
- evalscope/benchmarks/arc/ai2_arc.py +0 -151
- evalscope/benchmarks/benchmark.py +0 -81
- evalscope/benchmarks/ceval/ceval_exam.py +0 -146
- evalscope/benchmarks/cmmlu/cmmlu.py +0 -161
- evalscope/benchmarks/cmmlu/samples.jsonl +0 -5
- evalscope/benchmarks/competition_math/competition_math.py +0 -79
- evalscope/benchmarks/data_adapter.py +0 -528
- evalscope/benchmarks/filters.py +0 -59
- evalscope/benchmarks/gsm8k/gsm8k.py +0 -121
- evalscope/benchmarks/hellaswag/hellaswag.py +0 -112
- evalscope/benchmarks/humaneval/humaneval.py +0 -79
- evalscope/benchmarks/mmlu/mmlu.py +0 -160
- evalscope/benchmarks/mmlu/samples.jsonl +0 -5
- evalscope/benchmarks/process_bench/critique_template.txt +0 -13
- evalscope/benchmarks/race/race.py +0 -104
- evalscope/benchmarks/race/samples.jsonl +0 -5
- evalscope/benchmarks/super_gpqa/zero_shot_prompt.txt +0 -4
- evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -89
- evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -163
- evalscope/benchmarks/utils.py +0 -60
- evalscope/collections/evaluator.py +0 -375
- evalscope/metrics/completion_parsers.py +0 -227
- evalscope/metrics/named_metrics.py +0 -55
- evalscope/models/adapters/__init__.py +0 -14
- evalscope/models/adapters/base_adapter.py +0 -84
- evalscope/models/adapters/bfcl_adapter.py +0 -246
- evalscope/models/adapters/chat_adapter.py +0 -207
- evalscope/models/adapters/choice_adapter.py +0 -222
- evalscope/models/adapters/custom_adapter.py +0 -71
- evalscope/models/adapters/server_adapter.py +0 -236
- evalscope/models/adapters/t2i_adapter.py +0 -79
- evalscope/models/adapters/tau_bench_adapter.py +0 -189
- evalscope/models/custom/__init__.py +0 -4
- evalscope/models/custom/custom_model.py +0 -50
- evalscope/models/custom/dummy_model.py +0 -99
- evalscope/models/local_model.py +0 -128
- evalscope/models/register.py +0 -41
- tests/cli/test_run.py +0 -489
- /evalscope/{benchmarks/aigc → api}/__init__.py +0 -0
- /evalscope/benchmarks/{aigc/t2i → image_edit}/__init__.py +0 -0
- {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/LICENSE +0 -0
- {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/WHEEL +0 -0
- {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/entry_points.txt +0 -0
- {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/top_level.txt +0 -0
- /tests/{aigc → benchmark}/__init__.py +0 -0
|
@@ -44,8 +44,9 @@ def default_transforms(
|
|
|
44
44
|
return bins
|
|
45
45
|
|
|
46
46
|
def filter_doc_with_num_tokens(node, min_num_tokens=500):
|
|
47
|
-
return (
|
|
48
|
-
|
|
47
|
+
return (
|
|
48
|
+
node.type == NodeType.DOCUMENT and num_tokens_from_string(node.properties['page_content']) > min_num_tokens
|
|
49
|
+
)
|
|
49
50
|
|
|
50
51
|
def filter_docs(node):
|
|
51
52
|
return node.type == NodeType.DOCUMENT
|
|
@@ -90,7 +91,8 @@ def default_transforms(
|
|
|
90
91
|
target_lang=language,
|
|
91
92
|
llm=llm,
|
|
92
93
|
adapt_instruction=True,
|
|
93
|
-
)
|
|
94
|
+
)
|
|
95
|
+
)
|
|
94
96
|
|
|
95
97
|
transforms = [
|
|
96
98
|
headline_extractor,
|
|
@@ -121,7 +123,8 @@ def default_transforms(
|
|
|
121
123
|
target_lang=language,
|
|
122
124
|
llm=llm,
|
|
123
125
|
adapt_instruction=True,
|
|
124
|
-
)
|
|
126
|
+
)
|
|
127
|
+
)
|
|
125
128
|
|
|
126
129
|
transforms = [
|
|
127
130
|
summary_extractor,
|
|
@@ -113,7 +113,8 @@ def generate_testset(args: TestsetGenerationArguments) -> None:
|
|
|
113
113
|
|
|
114
114
|
# generate testset
|
|
115
115
|
generator = TestsetGenerator(
|
|
116
|
-
llm=wrapped_llm, embedding_model=wrapped_embeddings, knowledge_graph=knowledge_graph, persona_list=persona_list
|
|
116
|
+
llm=wrapped_llm, embedding_model=wrapped_embeddings, knowledge_graph=knowledge_graph, persona_list=persona_list
|
|
117
|
+
)
|
|
117
118
|
|
|
118
119
|
testset = generator.generate(
|
|
119
120
|
testset_size=args.test_size,
|
|
@@ -34,7 +34,8 @@ async def translate_prompt(
|
|
|
34
34
|
|
|
35
35
|
logger.info(f'Translating prompts to {target_lang}')
|
|
36
36
|
adapted_prompts = await prompt_user.adapt_prompts(
|
|
37
|
-
language=target_lang, llm=llm, adapt_instruction=adapt_instruction
|
|
37
|
+
language=target_lang, llm=llm, adapt_instruction=adapt_instruction
|
|
38
|
+
)
|
|
38
39
|
prompt_user.set_prompts(**adapted_prompts)
|
|
39
40
|
try:
|
|
40
41
|
prompt_user.save_prompts(prompt_dir)
|
|
@@ -164,6 +164,13 @@ class CrossEncoderModel(BaseModel):
|
|
|
164
164
|
max_length=self.max_seq_length,
|
|
165
165
|
automodel_args=self.model_kwargs,
|
|
166
166
|
)
|
|
167
|
+
self.tokenizer = self.model.tokenizer
|
|
168
|
+
# set pad token
|
|
169
|
+
if self.tokenizer.pad_token is None:
|
|
170
|
+
self.tokenizer.pad_token = self.tokenizer.eos_token
|
|
171
|
+
if ('pad_token_id' not in self.model.config) or (self.model.config.pad_token_id is None):
|
|
172
|
+
self.model.config.update({'pad_token_id': self.tokenizer.eos_token_id})
|
|
173
|
+
|
|
167
174
|
self.supported_encode_params = get_supported_params(self.model.predict)
|
|
168
175
|
|
|
169
176
|
def predict(self, sentences: List[List[str]], **kwargs) -> Tensor:
|
|
@@ -189,6 +196,7 @@ class APIEmbeddingModel(BaseModel):
|
|
|
189
196
|
self.openai_api_base = kwargs.get('api_base')
|
|
190
197
|
self.openai_api_key = kwargs.get('api_key')
|
|
191
198
|
self.dimensions = kwargs.get('dimensions')
|
|
199
|
+
self.check_embedding_ctx_length = kwargs.get('check_embedding_ctx_length', False)
|
|
192
200
|
self.framework = ['API']
|
|
193
201
|
|
|
194
202
|
self.model = OpenAIEmbeddings(
|
|
@@ -196,7 +204,8 @@ class APIEmbeddingModel(BaseModel):
|
|
|
196
204
|
openai_api_base=self.openai_api_base,
|
|
197
205
|
openai_api_key=self.openai_api_key,
|
|
198
206
|
dimensions=self.dimensions,
|
|
199
|
-
check_embedding_ctx_length=
|
|
207
|
+
check_embedding_ctx_length=self.check_embedding_ctx_length,
|
|
208
|
+
)
|
|
200
209
|
|
|
201
210
|
super().__init__(model_name_or_path=self.model_name, **kwargs)
|
|
202
211
|
|
|
@@ -2,11 +2,10 @@ import os
|
|
|
2
2
|
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
|
|
3
3
|
from langchain_core.language_models.llms import LLM as BaseLLM
|
|
4
4
|
from langchain_openai import ChatOpenAI
|
|
5
|
-
from transformers.generation.configuration_utils import GenerationConfig
|
|
6
5
|
from typing import Any, Dict, Iterator, List, Mapping, Optional
|
|
7
6
|
|
|
8
|
-
from evalscope.
|
|
9
|
-
from evalscope.
|
|
7
|
+
from evalscope.api.model import GenerateConfig, Model, get_model
|
|
8
|
+
from evalscope.constants import DEFAULT_MODEL_REVISION, EvalType
|
|
10
9
|
|
|
11
10
|
|
|
12
11
|
class LLM:
|
|
@@ -30,16 +29,19 @@ class LocalLLM(BaseLLM):
|
|
|
30
29
|
model_name_or_path: str
|
|
31
30
|
model_revision: str = DEFAULT_MODEL_REVISION
|
|
32
31
|
template_type: Optional[str] = None
|
|
33
|
-
model_name: Optional[str]
|
|
34
|
-
model: Optional[
|
|
35
|
-
generation_config: Optional[Dict]
|
|
32
|
+
model_name: Optional[str] = None
|
|
33
|
+
model: Optional[Model] = None
|
|
34
|
+
generation_config: Optional[Dict] = {}
|
|
36
35
|
|
|
37
36
|
def __init__(self, **kw):
|
|
38
37
|
super().__init__(**kw)
|
|
39
38
|
self.model_name = os.path.basename(self.model_name_or_path)
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
39
|
+
|
|
40
|
+
# Create and initialize the local model
|
|
41
|
+
self.model = get_model(
|
|
42
|
+
model=self.model_name_or_path,
|
|
43
|
+
eval_type=EvalType.CHECKPOINT,
|
|
44
|
+
config=GenerateConfig(**self.generation_config),
|
|
43
45
|
)
|
|
44
46
|
|
|
45
47
|
def _call(
|
|
@@ -50,10 +52,9 @@ class LocalLLM(BaseLLM):
|
|
|
50
52
|
**kwargs: Any,
|
|
51
53
|
) -> str:
|
|
52
54
|
"""Run the LLM on the given input."""
|
|
53
|
-
infer_cfg = {'stop': stop}
|
|
54
55
|
|
|
55
|
-
response
|
|
56
|
-
return response
|
|
56
|
+
response = self.model.generate(input=prompt)
|
|
57
|
+
return response.completion
|
|
57
58
|
|
|
58
59
|
@property
|
|
59
60
|
def _identifying_params(self) -> Dict[str, Any]:
|
evalscope/benchmarks/__init__.py
CHANGED
|
@@ -1,5 +1,12 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
1
|
+
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict
|
|
4
|
+
|
|
5
|
+
from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
|
|
6
|
+
from evalscope.api.dataset import Sample
|
|
7
|
+
from evalscope.api.evaluator import TaskState
|
|
8
|
+
from evalscope.api.registry import register_benchmark
|
|
9
|
+
from evalscope.constants import Tags
|
|
3
10
|
from evalscope.utils.logger import get_logger
|
|
4
11
|
|
|
5
12
|
# flake8: noqa
|
|
@@ -7,46 +14,37 @@ from evalscope.utils.logger import get_logger
|
|
|
7
14
|
logger = get_logger()
|
|
8
15
|
|
|
9
16
|
|
|
10
|
-
@
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
17
|
+
@register_benchmark(
|
|
18
|
+
BenchmarkMeta(
|
|
19
|
+
name='aime24',
|
|
20
|
+
pretty_name='AIME-2024',
|
|
21
|
+
tags=[Tags.MATH, Tags.REASONING],
|
|
22
|
+
description=
|
|
23
|
+
'The AIME 2024 benchmark is based on problems from the American Invitational Mathematics Examination, a prestigious high school mathematics competition. This benchmark tests a model\'s ability to solve challenging mathematics problems by generating step-by-step solutions and providing the correct final answer.', # noqa: E501
|
|
24
|
+
dataset_id='HuggingFaceH4/aime_2024',
|
|
25
|
+
subset_list=['default'],
|
|
26
|
+
metric_list=[{
|
|
27
|
+
'acc': {
|
|
28
|
+
'numeric': True
|
|
29
|
+
}
|
|
30
|
+
}],
|
|
31
|
+
few_shot_num=0,
|
|
32
|
+
train_split=None,
|
|
33
|
+
eval_split='train', # Only train set is available
|
|
34
|
+
prompt_template='{question}\nPlease reason step by step, and put your final answer within \\boxed{{}}.',
|
|
35
|
+
)
|
|
23
36
|
)
|
|
24
|
-
class AIME24Adapter(
|
|
37
|
+
class AIME24Adapter(DefaultDataAdapter):
|
|
25
38
|
|
|
26
39
|
def __init__(self, *args, **kwargs):
|
|
27
40
|
super().__init__(*args, **kwargs)
|
|
28
41
|
|
|
29
|
-
def
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
def get_gold_answer(self, input_d: dict) -> str:
|
|
39
|
-
# Extract the gold answer from the input dict.
|
|
40
|
-
return strip_answer_string(input_d['answer'])
|
|
41
|
-
|
|
42
|
-
def parse_pred_result(self, result: str, raw_input_d: dict = None, eval_type: str = 'checkpoint') -> str:
|
|
43
|
-
"""
|
|
44
|
-
Parse the model output to get the answer. Could be the best choice index.
|
|
45
|
-
"""
|
|
46
|
-
# Note: Use same extraction method for both of checkpoint/service/custom
|
|
47
|
-
result = strip_answer_string(extract_answer(result))
|
|
48
|
-
return result
|
|
49
|
-
|
|
50
|
-
def match(self, gold: str, pred: str) -> float:
|
|
51
|
-
res = math_equal(pred, gold)
|
|
52
|
-
return 1.0 if res else 0.0
|
|
42
|
+
def record_to_sample(self, record: Dict[str, Any]) -> Sample:
|
|
43
|
+
return Sample(
|
|
44
|
+
input=record['problem'],
|
|
45
|
+
target=record['answer'],
|
|
46
|
+
metadata={
|
|
47
|
+
'problem_id': record.get('id', ''),
|
|
48
|
+
'solution': record.get('solution', ''),
|
|
49
|
+
},
|
|
50
|
+
)
|
|
@@ -1,5 +1,12 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
1
|
+
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict
|
|
4
|
+
|
|
5
|
+
from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
|
|
6
|
+
from evalscope.api.dataset import Sample
|
|
7
|
+
from evalscope.api.evaluator import TaskState
|
|
8
|
+
from evalscope.api.registry import register_benchmark
|
|
9
|
+
from evalscope.constants import Tags
|
|
3
10
|
from evalscope.utils.logger import get_logger
|
|
4
11
|
|
|
5
12
|
# flake8: noqa
|
|
@@ -7,46 +14,33 @@ from evalscope.utils.logger import get_logger
|
|
|
7
14
|
logger = get_logger()
|
|
8
15
|
|
|
9
16
|
|
|
10
|
-
@
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
17
|
+
@register_benchmark(
|
|
18
|
+
BenchmarkMeta(
|
|
19
|
+
name='aime25',
|
|
20
|
+
pretty_name='AIME-2025',
|
|
21
|
+
tags=[Tags.MATH, Tags.REASONING],
|
|
22
|
+
description=
|
|
23
|
+
'The AIME 2025 benchmark is based on problems from the American Invitational Mathematics Examination, a prestigious high school mathematics competition. This benchmark tests a model\'s ability to solve challenging mathematics problems by generating step-by-step solutions and providing the correct final answer.',
|
|
24
|
+
dataset_id='opencompass/AIME2025',
|
|
25
|
+
subset_list=['AIME2025-I', 'AIME2025-II'],
|
|
26
|
+
metric_list=[{
|
|
27
|
+
'acc': {
|
|
28
|
+
'numeric': True
|
|
29
|
+
}
|
|
30
|
+
}],
|
|
31
|
+
few_shot_num=0,
|
|
32
|
+
train_split=None,
|
|
33
|
+
eval_split='test',
|
|
34
|
+
prompt_template='{question}\nPlease reason step by step, and put your final answer within \\boxed{{}}.',
|
|
35
|
+
)
|
|
23
36
|
)
|
|
24
|
-
class AIME25Adapter(
|
|
37
|
+
class AIME25Adapter(DefaultDataAdapter):
|
|
25
38
|
|
|
26
39
|
def __init__(self, *args, **kwargs):
|
|
27
40
|
super().__init__(*args, **kwargs)
|
|
28
41
|
|
|
29
|
-
def
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
full_prompt = self.prompt_template.format(query=problem)
|
|
35
|
-
|
|
36
|
-
return self.gen_prompt_data(full_prompt)
|
|
37
|
-
|
|
38
|
-
def get_gold_answer(self, input_d: dict) -> str:
|
|
39
|
-
# Extract the gold answer from the input dict.
|
|
40
|
-
return strip_answer_string(input_d['answer'])
|
|
41
|
-
|
|
42
|
-
def parse_pred_result(self, result: str, raw_input_d: dict = None, eval_type: str = 'checkpoint') -> str:
|
|
43
|
-
"""
|
|
44
|
-
Parse the model output to get the answer. Could be the best choice index.
|
|
45
|
-
"""
|
|
46
|
-
# Note: Use same extraction method for both of checkpoint/service/custom
|
|
47
|
-
result = strip_answer_string(extract_answer(result))
|
|
48
|
-
return result
|
|
49
|
-
|
|
50
|
-
def match(self, gold: str, pred: str) -> float:
|
|
51
|
-
res = math_equal(pred, gold)
|
|
52
|
-
return 1.0 if res else 0.0
|
|
42
|
+
def record_to_sample(self, record: Dict[str, Any]) -> Sample:
|
|
43
|
+
return Sample(
|
|
44
|
+
input=record['question'],
|
|
45
|
+
target=record['answer'],
|
|
46
|
+
)
|
|
@@ -1,16 +1,17 @@
|
|
|
1
1
|
import re
|
|
2
|
-
from
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
from evalscope.
|
|
6
|
-
from evalscope.
|
|
2
|
+
from typing import Any, Dict
|
|
3
|
+
|
|
4
|
+
from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
|
|
5
|
+
from evalscope.api.dataset import Sample
|
|
6
|
+
from evalscope.api.evaluator import TaskState
|
|
7
|
+
from evalscope.api.metric import Score
|
|
8
|
+
from evalscope.api.registry import register_benchmark
|
|
9
|
+
from evalscope.constants import Tags
|
|
7
10
|
from evalscope.utils.logger import get_logger
|
|
8
11
|
|
|
9
|
-
# flake8: noqa
|
|
10
|
-
|
|
11
12
|
logger = get_logger()
|
|
12
13
|
|
|
13
|
-
GRADER_SYSTEM_PROMPT = """You are a highly efficient assistant, who evaluates and selects the best large language model (LLMs) based on the quality of their responses to a given instruction. This process will be used to create a leaderboard reflecting the most accurate and human-preferred answers."""
|
|
14
|
+
GRADER_SYSTEM_PROMPT = """You are a highly efficient assistant, who evaluates and selects the best large language model (LLMs) based on the quality of their responses to a given instruction. This process will be used to create a leaderboard reflecting the most accurate and human-preferred answers.""" # noqa: E501
|
|
14
15
|
|
|
15
16
|
GRADER_TEMPLATE = """
|
|
16
17
|
I require a leaderboard for various large language models. I'll provide you with prompts given to these models and their corresponding outputs. Your task is to assess these responses, and select the model that produces the best output from a human perspective.
|
|
@@ -44,64 +45,89 @@ Evaluate the models based on the quality and relevance of their outputs, and sel
|
|
|
44
45
|
""".strip() # noqa: E501
|
|
45
46
|
|
|
46
47
|
|
|
47
|
-
@
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
48
|
+
@register_benchmark(
|
|
49
|
+
BenchmarkMeta(
|
|
50
|
+
name='alpaca_eval',
|
|
51
|
+
pretty_name='AlpacaEval2.0',
|
|
52
|
+
tags=[Tags.INSTRUCTION_FOLLOWING, Tags.ARENA],
|
|
53
|
+
description='Alpaca Eval 2.0 is an enhanced framework for evaluating instruction-following language models, '
|
|
54
|
+
'featuring an improved auto-annotator, updated baselines, and continuous preference calculation to '
|
|
55
|
+
'provide more accurate and cost-effective model assessments. '
|
|
56
|
+
'Currently not support `length-controlled winrate`; the official Judge model is `gpt-4-1106-preview`, while the baseline model is `gpt-4-turbo`.', # noqa: E501
|
|
57
|
+
dataset_id='AI-ModelScope/alpaca_eval',
|
|
58
|
+
subset_list=['alpaca_eval_gpt4_baseline'],
|
|
59
|
+
metric_list=['winrate'],
|
|
60
|
+
few_shot_num=0,
|
|
61
|
+
train_split=None,
|
|
62
|
+
eval_split='eval',
|
|
63
|
+
prompt_template='{question}'
|
|
64
|
+
)
|
|
65
|
+
)
|
|
66
|
+
class AlpacaEvalAdapter(DefaultDataAdapter):
|
|
62
67
|
|
|
63
68
|
def __init__(self, *args, **kwargs):
|
|
64
69
|
super().__init__(*args, **kwargs)
|
|
65
70
|
|
|
66
|
-
#
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
71
|
+
self._use_llm_judge = True # Use LLM as a judge by default
|
|
72
|
+
|
|
73
|
+
def record_to_sample(self, record: Dict[str, Any]) -> Sample:
|
|
74
|
+
"""
|
|
75
|
+
Convert a data record to a Sample object.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
record (Dict[str, Any]): Input data record.
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
Sample: Sample object with input, target, and metadata.
|
|
82
|
+
"""
|
|
83
|
+
instruction = record['instruction']
|
|
84
|
+
baseline_output = record['output'] # baseline model output
|
|
85
|
+
|
|
86
|
+
return Sample(
|
|
87
|
+
input=instruction,
|
|
88
|
+
target=baseline_output,
|
|
89
|
+
metadata={
|
|
90
|
+
'generator': record.get('generator', 'unknown'),
|
|
91
|
+
'dataset': record.get('dataset', 'unknown')
|
|
92
|
+
}
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
def llm_match_score(
|
|
96
|
+
self,
|
|
97
|
+
original_prediction: str,
|
|
98
|
+
filtered_prediction: str,
|
|
99
|
+
reference: str,
|
|
100
|
+
task_state: TaskState,
|
|
101
|
+
) -> Score:
|
|
102
|
+
score = Score(
|
|
103
|
+
extracted_prediction=filtered_prediction,
|
|
104
|
+
prediction=original_prediction,
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
instruction = task_state.input_text
|
|
108
|
+
|
|
109
|
+
# Request judge and obtain score
|
|
110
|
+
# reference is baseline answer 'm', filtered_prediction is model answer 'M'
|
|
111
|
+
prompt = GRADER_TEMPLATE.format(instruction=instruction, output_1=reference, output_2=filtered_prediction)
|
|
112
|
+
judge_response = self.llm_judge.judge(prompt, system_prompt=GRADER_SYSTEM_PROMPT)
|
|
78
113
|
|
|
79
|
-
def parse_pred_result(self, result: str, raw_input_d: dict = None, **kwargs) -> str:
|
|
80
|
-
return result.strip()
|
|
81
|
-
|
|
82
|
-
def match(self, gold: str, pred: str):
|
|
83
|
-
# simple match
|
|
84
|
-
logger.warning(f'Please use LLMJudge to match the result for {self.name}')
|
|
85
|
-
return None
|
|
86
|
-
|
|
87
|
-
def llm_match(self, gold: Any, pred: Any, judge: LLMJudge, **kwargs) -> bool:
|
|
88
|
-
raw_input = kwargs.get('raw_input', None)
|
|
89
|
-
instruction = raw_input['instruction']
|
|
90
|
-
# gold is baseline answer 'm', pred is model answer 'M'
|
|
91
|
-
prompt = GRADER_TEMPLATE.format(instruction=instruction, output_1=gold, output_2=pred)
|
|
92
|
-
# get grading response
|
|
93
|
-
grading_response = judge(prompt, system_prompt=GRADER_SYSTEM_PROMPT)
|
|
94
114
|
# parse grading response
|
|
95
|
-
match = re.search(r'(m|M)',
|
|
115
|
+
match = re.search(r'(m|M)', judge_response)
|
|
96
116
|
res = match.group(0) if match else None
|
|
117
|
+
|
|
97
118
|
if res:
|
|
98
|
-
|
|
119
|
+
winrate = 1 if res == 'M' else 0
|
|
99
120
|
else:
|
|
100
|
-
logger.info(f'Failed to parse grading response: {prompt=}\n {
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
121
|
+
logger.info(f'Failed to parse grading response: {prompt=}\n {judge_response=}')
|
|
122
|
+
winrate = 0
|
|
123
|
+
|
|
124
|
+
# Set score based on the match result
|
|
125
|
+
score.value = {'winrate': winrate}
|
|
126
|
+
score.explanation = f'LLM judge: {judge_response}'
|
|
127
|
+
score.metadata = {
|
|
128
|
+
'source': 'llm_judge',
|
|
129
|
+
'judge_strategy': self.judge_strategy,
|
|
130
|
+
'model': self.llm_judge.model_id
|
|
131
|
+
}
|
|
132
|
+
score.main_score_name = 'winrate'
|
|
133
|
+
return score
|