evalscope 0.17.1__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of evalscope might be problematic. Click here for more details.
- evalscope/__init__.py +4 -1
- evalscope/api/benchmark/__init__.py +3 -0
- evalscope/api/benchmark/adapters/__init__.py +5 -0
- evalscope/api/benchmark/adapters/default_data_adapter.py +684 -0
- evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
- evalscope/api/benchmark/adapters/multi_choice_adapter.py +83 -0
- evalscope/api/benchmark/adapters/text2image_adapter.py +156 -0
- evalscope/api/benchmark/adapters/vision_language_adapter.py +6 -0
- evalscope/api/benchmark/benchmark.py +356 -0
- evalscope/api/benchmark/meta.py +121 -0
- evalscope/api/dataset/__init__.py +2 -0
- evalscope/api/dataset/dataset.py +349 -0
- evalscope/api/dataset/loader.py +262 -0
- evalscope/api/dataset/utils.py +143 -0
- evalscope/api/evaluator/__init__.py +3 -0
- evalscope/api/evaluator/cache.py +378 -0
- evalscope/api/evaluator/evaluator.py +56 -0
- evalscope/api/evaluator/state.py +275 -0
- evalscope/api/filter/__init__.py +1 -0
- evalscope/api/filter/filter.py +72 -0
- evalscope/api/messages/__init__.py +12 -0
- evalscope/api/messages/chat_message.py +243 -0
- evalscope/api/messages/content.py +102 -0
- evalscope/api/messages/utils.py +35 -0
- evalscope/api/metric/__init__.py +2 -0
- evalscope/api/metric/metric.py +55 -0
- evalscope/api/metric/scorer.py +113 -0
- evalscope/api/mixin/__init__.py +1 -0
- evalscope/api/mixin/llm_judge_mixin.py +168 -0
- evalscope/api/model/__init__.py +12 -0
- evalscope/api/model/generate_config.py +155 -0
- evalscope/api/model/model.py +386 -0
- evalscope/api/model/model_output.py +285 -0
- evalscope/api/registry.py +182 -0
- evalscope/api/tool/__init__.py +3 -0
- evalscope/api/tool/tool_call.py +101 -0
- evalscope/api/tool/tool_info.py +173 -0
- evalscope/api/tool/utils.py +64 -0
- evalscope/app/app.py +3 -0
- evalscope/app/ui/app_ui.py +2 -1
- evalscope/app/ui/multi_model.py +50 -25
- evalscope/app/ui/single_model.py +26 -14
- evalscope/app/utils/data_utils.py +43 -27
- evalscope/app/utils/env_utils.py +12 -0
- evalscope/app/utils/text_utils.py +14 -14
- evalscope/app/utils/visualization.py +9 -4
- evalscope/arguments.py +7 -10
- evalscope/backend/opencompass/api_meta_template.py +2 -1
- evalscope/backend/opencompass/backend_manager.py +6 -5
- evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +10 -10
- evalscope/backend/rag_eval/clip_benchmark/task_template.py +8 -4
- evalscope/backend/rag_eval/ragas/task_template.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/build_transform.py +7 -4
- evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +2 -1
- evalscope/backend/rag_eval/utils/embedding.py +10 -1
- evalscope/backend/rag_eval/utils/llm.py +13 -12
- evalscope/benchmarks/__init__.py +0 -2
- evalscope/benchmarks/aime/aime24_adapter.py +38 -40
- evalscope/benchmarks/aime/aime25_adapter.py +34 -40
- evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +86 -60
- evalscope/benchmarks/arc/arc_adapter.py +34 -147
- evalscope/benchmarks/arena_hard/arena_hard_adapter.py +96 -70
- evalscope/benchmarks/arena_hard/utils.py +37 -1
- evalscope/benchmarks/bbh/bbh_adapter.py +72 -144
- evalscope/benchmarks/bfcl/bfcl_adapter.py +188 -171
- evalscope/benchmarks/bfcl/generation.py +222 -0
- evalscope/benchmarks/ceval/ceval_adapter.py +93 -162
- evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +85 -82
- evalscope/benchmarks/cmmlu/cmmlu_adapter.py +34 -125
- evalscope/benchmarks/competition_math/competition_math_adapter.py +56 -108
- evalscope/benchmarks/data_collection/data_collection_adapter.py +187 -45
- evalscope/benchmarks/docmath/docmath_adapter.py +109 -51
- evalscope/benchmarks/docmath/utils.py +4 -5
- evalscope/benchmarks/drop/drop_adapter.py +88 -40
- evalscope/benchmarks/frames/frames_adapter.py +136 -52
- evalscope/benchmarks/general_arena/general_arena_adapter.py +140 -98
- evalscope/benchmarks/general_arena/utils.py +23 -27
- evalscope/benchmarks/general_mcq/general_mcq_adapter.py +40 -101
- evalscope/benchmarks/general_qa/general_qa_adapter.py +73 -134
- evalscope/benchmarks/gpqa/gpqa_adapter.py +61 -100
- evalscope/benchmarks/gpqa/{chain_of_thought.txt → prompt.py} +12 -5
- evalscope/benchmarks/gsm8k/gsm8k_adapter.py +62 -142
- evalscope/benchmarks/hellaswag/hellaswag_adapter.py +35 -124
- evalscope/benchmarks/hle/hle_adapter.py +127 -93
- evalscope/benchmarks/humaneval/humaneval_adapter.py +86 -55
- evalscope/benchmarks/ifeval/ifeval_adapter.py +69 -40
- evalscope/benchmarks/ifeval/instructions.py +109 -64
- evalscope/benchmarks/ifeval/instructions_registry.py +1 -1
- evalscope/benchmarks/ifeval/instructions_util.py +2 -3
- evalscope/benchmarks/ifeval/utils.py +6 -7
- evalscope/benchmarks/image_edit/gedit/__init__.py +0 -0
- evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
- evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
- evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
- evalscope/benchmarks/iquiz/iquiz_adapter.py +30 -65
- evalscope/benchmarks/live_code_bench/evaluate_utils.py +2 -2
- evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +121 -71
- evalscope/benchmarks/live_code_bench/load_utils.py +13 -21
- evalscope/benchmarks/live_code_bench/testing_util.py +6 -2
- evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +49 -75
- evalscope/benchmarks/math_500/math_500_adapter.py +41 -48
- evalscope/benchmarks/math_vista/__init__.py +0 -0
- evalscope/benchmarks/math_vista/math_vista_adapter.py +129 -0
- evalscope/benchmarks/mmlu/mmlu_adapter.py +32 -205
- evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +80 -99
- evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +64 -110
- evalscope/benchmarks/mmmu/__init__.py +0 -0
- evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
- evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
- evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +129 -0
- evalscope/benchmarks/musr/musr_adapter.py +33 -64
- evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +196 -152
- evalscope/benchmarks/process_bench/process_bench_adapter.py +144 -76
- evalscope/benchmarks/race/race_adapter.py +33 -119
- evalscope/benchmarks/simple_qa/simple_qa_adapter.py +72 -70
- evalscope/benchmarks/super_gpqa/{five_shot_prompt.txt → prompt.py} +14 -16
- evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +73 -117
- evalscope/benchmarks/super_gpqa/utils.py +2 -1
- evalscope/benchmarks/tau_bench/generation.py +147 -0
- evalscope/benchmarks/tau_bench/tau_bench_adapter.py +114 -60
- evalscope/benchmarks/text2image/__init__.py +0 -0
- evalscope/benchmarks/text2image/evalmuse_adapter.py +78 -0
- evalscope/benchmarks/text2image/genai_bench_adapter.py +53 -0
- evalscope/benchmarks/text2image/general_t2i_adapter.py +42 -0
- evalscope/benchmarks/text2image/hpdv2_adapter.py +52 -0
- evalscope/benchmarks/text2image/tifa_adapter.py +27 -0
- evalscope/benchmarks/tool_bench/tool_bench_adapter.py +91 -70
- evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +56 -124
- evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +70 -266
- evalscope/benchmarks/winogrande/winogrande_adapter.py +28 -54
- evalscope/cli/cli.py +2 -0
- evalscope/cli/start_app.py +7 -1
- evalscope/cli/start_perf.py +7 -1
- evalscope/cli/start_server.py +6 -3
- evalscope/collections/__init__.py +2 -10
- evalscope/collections/sampler.py +10 -10
- evalscope/collections/schema.py +13 -11
- evalscope/config.py +157 -57
- evalscope/constants.py +37 -61
- evalscope/evaluator/__init__.py +1 -1
- evalscope/evaluator/evaluator.py +275 -419
- evalscope/filters/__init__.py +2 -0
- evalscope/filters/extraction.py +126 -0
- evalscope/filters/selection.py +57 -0
- evalscope/metrics/__init__.py +13 -13
- evalscope/metrics/llm_judge.py +47 -33
- evalscope/metrics/math_parser.py +27 -22
- evalscope/metrics/metric.py +307 -0
- evalscope/metrics/metrics.py +22 -18
- evalscope/metrics/t2v_metrics/__init__.py +0 -52
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +4 -2
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +9 -13
- evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +2 -1
- evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +3 -2
- evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +2 -1
- evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +2 -2
- evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +2 -1
- evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +4 -2
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +10 -5
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +4 -2
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +2 -1
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +15 -9
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +4 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +15 -10
- evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +9 -6
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +2 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +4 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +4 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +3 -9
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +16 -10
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +3 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +4 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +8 -4
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +47 -25
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +12 -7
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +23 -17
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +33 -23
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +2 -1
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +46 -30
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +69 -37
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +7 -5
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +6 -4
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +7 -5
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +3 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +5 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +17 -13
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +35 -19
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +14 -12
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +63 -52
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +63 -38
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +6 -3
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +6 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +3 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +15 -13
- evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +3 -2
- evalscope/models/__init__.py +6 -29
- evalscope/models/image_edit_model.py +125 -0
- evalscope/models/mockllm.py +65 -0
- evalscope/models/model_apis.py +67 -0
- evalscope/models/modelscope.py +455 -0
- evalscope/models/openai_compatible.py +126 -0
- evalscope/models/text2image_model.py +124 -0
- evalscope/models/utils/openai.py +701 -0
- evalscope/perf/benchmark.py +4 -1
- evalscope/perf/http_client.py +4 -2
- evalscope/perf/plugin/api/custom_api.py +5 -4
- evalscope/perf/plugin/api/openai_api.py +11 -9
- evalscope/perf/plugin/datasets/custom.py +2 -1
- evalscope/perf/plugin/datasets/flickr8k.py +1 -1
- evalscope/perf/plugin/datasets/kontext_bench.py +1 -1
- evalscope/perf/plugin/datasets/line_by_line.py +2 -1
- evalscope/perf/plugin/datasets/longalpaca.py +2 -1
- evalscope/perf/plugin/datasets/openqa.py +4 -2
- evalscope/perf/utils/benchmark_util.py +15 -10
- evalscope/perf/utils/db_util.py +9 -6
- evalscope/perf/utils/local_server.py +11 -3
- evalscope/perf/utils/rich_display.py +16 -10
- evalscope/report/__init__.py +2 -3
- evalscope/report/combinator.py +18 -12
- evalscope/report/generator.py +51 -35
- evalscope/report/{utils.py → report.py} +8 -6
- evalscope/run.py +33 -47
- evalscope/summarizer.py +1 -1
- evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -4
- evalscope/utils/__init__.py +21 -2
- evalscope/utils/chat_service.py +3 -2
- evalscope/utils/deprecation_utils.py +12 -1
- evalscope/utils/function_utils.py +29 -0
- evalscope/utils/import_utils.py +23 -1
- evalscope/utils/io_utils.py +142 -6
- evalscope/utils/json_schema.py +208 -0
- evalscope/utils/logger.py +51 -12
- evalscope/utils/model_utils.py +11 -7
- evalscope/utils/multi_choices.py +288 -0
- evalscope/utils/url_utils.py +65 -0
- evalscope/version.py +2 -2
- {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/METADATA +108 -62
- {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/RECORD +258 -226
- tests/benchmark/test_eval.py +385 -0
- tests/benchmark/test_image_edit.py +65 -0
- tests/{aigc → benchmark}/test_t2i.py +22 -4
- tests/benchmark/test_vlm.py +80 -0
- tests/cli/test_all.py +85 -47
- tests/cli/test_collection.py +20 -8
- tests/cli/test_custom.py +22 -15
- tests/cli/test_reasoning.py +81 -0
- tests/common.py +73 -0
- tests/perf/test_perf.py +4 -2
- tests/rag/test_clip_benchmark.py +0 -2
- evalscope/benchmarks/aigc/t2i/base.py +0 -56
- evalscope/benchmarks/aigc/t2i/evalmuse_adapter.py +0 -78
- evalscope/benchmarks/aigc/t2i/genai_bench_adapter.py +0 -58
- evalscope/benchmarks/aigc/t2i/general_t2i_adapter.py +0 -58
- evalscope/benchmarks/aigc/t2i/hpdv2_adapter.py +0 -57
- evalscope/benchmarks/aigc/t2i/tifa_adapter.py +0 -37
- evalscope/benchmarks/arc/ai2_arc.py +0 -151
- evalscope/benchmarks/benchmark.py +0 -81
- evalscope/benchmarks/ceval/ceval_exam.py +0 -146
- evalscope/benchmarks/cmmlu/cmmlu.py +0 -161
- evalscope/benchmarks/cmmlu/samples.jsonl +0 -5
- evalscope/benchmarks/competition_math/competition_math.py +0 -79
- evalscope/benchmarks/data_adapter.py +0 -528
- evalscope/benchmarks/filters.py +0 -59
- evalscope/benchmarks/gsm8k/gsm8k.py +0 -121
- evalscope/benchmarks/hellaswag/hellaswag.py +0 -112
- evalscope/benchmarks/humaneval/humaneval.py +0 -79
- evalscope/benchmarks/mmlu/mmlu.py +0 -160
- evalscope/benchmarks/mmlu/samples.jsonl +0 -5
- evalscope/benchmarks/process_bench/critique_template.txt +0 -13
- evalscope/benchmarks/race/race.py +0 -104
- evalscope/benchmarks/race/samples.jsonl +0 -5
- evalscope/benchmarks/super_gpqa/zero_shot_prompt.txt +0 -4
- evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -89
- evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -163
- evalscope/benchmarks/utils.py +0 -60
- evalscope/collections/evaluator.py +0 -375
- evalscope/metrics/completion_parsers.py +0 -227
- evalscope/metrics/named_metrics.py +0 -55
- evalscope/models/adapters/__init__.py +0 -14
- evalscope/models/adapters/base_adapter.py +0 -84
- evalscope/models/adapters/bfcl_adapter.py +0 -246
- evalscope/models/adapters/chat_adapter.py +0 -207
- evalscope/models/adapters/choice_adapter.py +0 -222
- evalscope/models/adapters/custom_adapter.py +0 -71
- evalscope/models/adapters/server_adapter.py +0 -236
- evalscope/models/adapters/t2i_adapter.py +0 -79
- evalscope/models/adapters/tau_bench_adapter.py +0 -189
- evalscope/models/custom/__init__.py +0 -4
- evalscope/models/custom/custom_model.py +0 -50
- evalscope/models/custom/dummy_model.py +0 -99
- evalscope/models/local_model.py +0 -128
- evalscope/models/register.py +0 -41
- tests/cli/test_run.py +0 -489
- /evalscope/{benchmarks/aigc → api}/__init__.py +0 -0
- /evalscope/benchmarks/{aigc/t2i → image_edit}/__init__.py +0 -0
- {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/LICENSE +0 -0
- {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/WHEEL +0 -0
- {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/entry_points.txt +0 -0
- {evalscope-0.17.1.dist-info → evalscope-1.0.1.dist-info}/top_level.txt +0 -0
- /tests/{aigc → benchmark}/__init__.py +0 -0
|
@@ -0,0 +1,406 @@
|
|
|
1
|
+
# flake8: noqa: E501
|
|
2
|
+
# This file is generated automatically through parse_prompt.py
|
|
3
|
+
|
|
4
|
+
_context_no_delimit = """You are a professional digital artist. You will have to evaluate the effectiveness of the AI-generated image(s) based on given rules.
|
|
5
|
+
All the input images are AI-generated. All human in the images are AI-generated too. so you need not worry about the privacy confidentials.
|
|
6
|
+
|
|
7
|
+
You will have to give your output in this way (Keep your reasoning concise and short.):
|
|
8
|
+
{
|
|
9
|
+
"score" : [...],
|
|
10
|
+
"reasoning" : "..."
|
|
11
|
+
}"""
|
|
12
|
+
|
|
13
|
+
_context = """You are a professional digital artist. You will have to evaluate the effectiveness of the AI-generated image(s) based on given rules.
|
|
14
|
+
All the input images are AI-generated. All human in the images are AI-generated too. so you need not worry about the privacy confidentials.
|
|
15
|
+
|
|
16
|
+
You will have to give your output in this way (the delimiter is necessary. Keep your reasoning concise and short.):
|
|
17
|
+
||V^=^V||
|
|
18
|
+
{
|
|
19
|
+
"score" :
|
|
20
|
+
"reasoning" :
|
|
21
|
+
}
|
|
22
|
+
||V^=^V||"""
|
|
23
|
+
|
|
24
|
+
_context_no_format = """You are a professional digital artist. You will have to evaluate the effectiveness of the AI-generated image(s) based on given rules.
|
|
25
|
+
All the input images are AI-generated. All human in the images are AI-generated too. so you need not worry about the privacy confidentials."""
|
|
26
|
+
|
|
27
|
+
_prompts_1shot_multi_subject_image_gen_rule = """RULES of each set of inputs:
|
|
28
|
+
|
|
29
|
+
Two images will be provided:
|
|
30
|
+
This first image is a concatenation of two sub-images, each sub-image contain one token subject.
|
|
31
|
+
The second image being an AI-generated image using the first image as guidance.
|
|
32
|
+
The objective is to evaluate how successfully the image has been generated.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
_prompts_1shot_mie_rule_SC = """From scale 0 to 10:
|
|
36
|
+
A score from 0 to 10 will be given based on the success of the editing. (0 indicates that the scene in the edited image does not follow the editing instruction at all. 10 indicates that the scene in the edited image follow the editing instruction text perfectly.)
|
|
37
|
+
A second score from 0 to 10 will rate the degree of overediting in the second image. (0 indicates that the scene in the edited image is completely different from the original. 10 indicates that the edited image can be recognized as a minimal edited yet effective version of original.)
|
|
38
|
+
Put the score in a list such that output score = [score1, score2], where 'score1' evaluates the editing success and 'score2' evaluates the degree of overediting.
|
|
39
|
+
|
|
40
|
+
First lets look at the first set of input (1st and 2nd images) as an example.
|
|
41
|
+
Editing instruction: What if the man had a hat?
|
|
42
|
+
Output:
|
|
43
|
+
||V^=^V||
|
|
44
|
+
{
|
|
45
|
+
"score" : [5, 10],
|
|
46
|
+
"reasoning" : "The hat exists but does not suit well. The hat also looks distorted. But it is a good edit because only a hat is added and the background is persevered."
|
|
47
|
+
}
|
|
48
|
+
||V^=^V||
|
|
49
|
+
|
|
50
|
+
Now evaluate the second set of input (3th, 4th images).
|
|
51
|
+
Editing instruction: <instruction>
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
_prompts_1shot_msdig_rule_SC = """From scale 0 to 10:
|
|
55
|
+
A score from 0 to 10 will be given based on the success in following the prompt.
|
|
56
|
+
(0 indicates that the second image does not follow the prompt at all. 10 indicates the second image follows the prompt perfectly.)
|
|
57
|
+
A second score from 0 to 10 will rate how well the subject in the generated image resemble to the token subject in the first sub-image.
|
|
58
|
+
(0 indicates that the subject in the second image does not look like the token subject in the first sub-image at all. 10 indicates the subject in the second image look exactly alike the token subject in the first sub-image.)
|
|
59
|
+
A third score from 0 to 10 will rate how well the subject in the generated image resemble to the token subject in the second sub-image.
|
|
60
|
+
(0 indicates that the subject in the second image does not look like the token subject in the second sub-image at all. 10 indicates the subject in the second image look exactly alike the token subject in the second sub-image.)
|
|
61
|
+
Put the score in a list such that output score = [score1, score2, score3], where 'score1' evaluates the prompt and 'score2' evaluates the resemblance for the first sub-image, and 'score3' evaluates the resemblance for the second sub-image.
|
|
62
|
+
|
|
63
|
+
First lets look at the first set of input (1st and 2nd images) as an example.
|
|
64
|
+
Text Prompt: A digital illustration of a cat beside a wooden pot
|
|
65
|
+
Output:
|
|
66
|
+
||V^=^V||
|
|
67
|
+
{
|
|
68
|
+
"score" : [5, 5, 10],
|
|
69
|
+
"reasoning" : "The cat is not beside the wooden pot. The pot looks partially resemble to the subject pot. The cat looks highly resemble to the subject cat."
|
|
70
|
+
}
|
|
71
|
+
||V^=^V||
|
|
72
|
+
|
|
73
|
+
Now evaluate the second set of input (3th, 4th images).
|
|
74
|
+
Text Prompt: <prompt>"""
|
|
75
|
+
|
|
76
|
+
_prompts_1shot_t2i_rule_SC = """From scale 0 to 10:
|
|
77
|
+
A score from 0 to 10 will be given based on the success in following the prompt.
|
|
78
|
+
(0 indicates that the AI generated image does not follow the prompt at all. 10 indicates the AI generated image follows the prompt perfectly.)
|
|
79
|
+
|
|
80
|
+
Put the score in a list such that output score = [score].
|
|
81
|
+
|
|
82
|
+
First lets look at the first set of input (1st image) as an example.
|
|
83
|
+
Text Prompt: A pink and a white frisbee are on the ground.
|
|
84
|
+
Output:
|
|
85
|
+
||V^=^V||
|
|
86
|
+
{
|
|
87
|
+
"score" : [5],
|
|
88
|
+
"reasoning" : "White frisbee not present in the image."
|
|
89
|
+
}
|
|
90
|
+
||V^=^V||
|
|
91
|
+
|
|
92
|
+
Now evaluate the second set of input (2nd image).
|
|
93
|
+
Text Prompt: <prompt>
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
_prompts_1shot_tie_rule_SC = """From scale 0 to 10:
|
|
97
|
+
A score from 0 to 10 will be given based on the success of the editing. (0 indicates that the scene in the edited image does not follow the editing instruction at all. 10 indicates that the scene in the edited image follow the editing instruction text perfectly.)
|
|
98
|
+
A second score from 0 to 10 will rate the degree of overediting in the second image. (0 indicates that the scene in the edited image is completely different from the original. 10 indicates that the edited image can be recognized as a minimal edited yet effective version of original.)
|
|
99
|
+
Put the score in a list such that output score = [score1, score2], where 'score1' evaluates the editing success and 'score2' evaluates the degree of overediting.
|
|
100
|
+
|
|
101
|
+
First lets look at the first set of input (1st and 2nd images) as an example.
|
|
102
|
+
Editing instruction: What if the man had a hat?
|
|
103
|
+
Output:
|
|
104
|
+
||V^=^V||
|
|
105
|
+
{
|
|
106
|
+
"score" : [5, 10],
|
|
107
|
+
"reasoning" : "The hat exists but does not suit well. The hat also looks distorted. But it is a good edit because only a hat is added and the background is persevered."
|
|
108
|
+
}
|
|
109
|
+
||V^=^V||
|
|
110
|
+
|
|
111
|
+
Now evaluate the second set of input (3th, 4th images).
|
|
112
|
+
Editing instruction: <instruction>
|
|
113
|
+
"""
|
|
114
|
+
|
|
115
|
+
_prompts_1shot_sdie_rule_SC = """From scale 0 to 10:
|
|
116
|
+
A score from 0 to 10 will rate how well the subject in the generated image resemble to the token subject in the second image.
|
|
117
|
+
(0 indicates that the subject in the third image does not look like the token subject at all. 10 indicates the subject in the third image look exactly alike the token subject.)
|
|
118
|
+
A second score from 0 to 10 will rate the degree of overediting in the second image.
|
|
119
|
+
(0 indicates that the scene in the edited image is completely different from the first image. 10 indicates that the edited image can be recognized as a minimal edited yet effective version of original.)
|
|
120
|
+
Put the score in a list such that output score = [score1, score2], where 'score1' evaluates the resemblance and 'score2' evaluates the degree of overediting.
|
|
121
|
+
|
|
122
|
+
First lets look at the first set of input (1st, 2nd and 3rd images) as an example.
|
|
123
|
+
Subject: <subject>
|
|
124
|
+
Output:
|
|
125
|
+
||V^=^V||
|
|
126
|
+
{
|
|
127
|
+
"score" : [5, 10],
|
|
128
|
+
"reasoning" : "The monster toy looks partially resemble to the token subject. The edit is minimal."
|
|
129
|
+
}
|
|
130
|
+
||V^=^V||
|
|
131
|
+
|
|
132
|
+
Now evaluate the second set of input (4th, 5th, and 6th images).
|
|
133
|
+
Subject: <subject>
|
|
134
|
+
"""
|
|
135
|
+
|
|
136
|
+
_prompts_1shot_one_image_gen_rule = """RULES of each set of inputs:
|
|
137
|
+
|
|
138
|
+
One image will be provided; The image is an AI-generated image.
|
|
139
|
+
The objective is to evaluate how successfully the image has been generated.
|
|
140
|
+
"""
|
|
141
|
+
|
|
142
|
+
_prompts_1shot_sdig_rule_SC = """From scale 0 to 10:
|
|
143
|
+
A score from 0 to 10 will be given based on the success in following the prompt.
|
|
144
|
+
(0 indicates that the second image does not follow the prompt at all. 10 indicates the second image follows the prompt perfectly.)
|
|
145
|
+
A second score from 0 to 10 will rate how well the subject in the generated image resemble to the token subject in the first image.
|
|
146
|
+
(0 indicates that the subject in the second image does not look like the token subject at all. 10 indicates the subject in the second image look exactly alike the token subject.)
|
|
147
|
+
Put the score in a list such that output score = [score1, score2], where 'score1' evaluates the prompt and 'score2' evaluates the resemblance.
|
|
148
|
+
|
|
149
|
+
First lets look at the first set of input (1st and 2nd images) as an example.
|
|
150
|
+
Text Prompt: a red cartoon figure eating a banana
|
|
151
|
+
Output:
|
|
152
|
+
||V^=^V||
|
|
153
|
+
{
|
|
154
|
+
"score" : [10, 5],
|
|
155
|
+
"reasoning" : "The red cartoon figure is eating a banana. The red cartoon figure looks partially resemble to the subject."
|
|
156
|
+
}
|
|
157
|
+
||V^=^V||
|
|
158
|
+
|
|
159
|
+
Now evaluate the second set of input (3th, 4th images).
|
|
160
|
+
Text Prompt: <prompt>
|
|
161
|
+
"""
|
|
162
|
+
|
|
163
|
+
_prompts_1shot_rule_PQ = """RULES of each set of inputs:
|
|
164
|
+
|
|
165
|
+
One image will be provided; The image is an AI-generated image.
|
|
166
|
+
The objective is to evaluate how successfully the image has been generated.
|
|
167
|
+
|
|
168
|
+
From scale 0 to 10:
|
|
169
|
+
A score from 0 to 10 will be given based on image naturalness.
|
|
170
|
+
(
|
|
171
|
+
0 indicates that the scene in the image does not look natural at all or give a unnatural feeling such as wrong sense of distance, or wrong shadow, or wrong lighting.
|
|
172
|
+
10 indicates that the image looks natural.
|
|
173
|
+
)
|
|
174
|
+
A second score from 0 to 10 will rate the image artifacts.
|
|
175
|
+
(
|
|
176
|
+
0 indicates that the image contains a large portion of distortion, or watermark, or scratches, or blurred faces, or unusual body parts, or subjects not harmonized.
|
|
177
|
+
10 indicates the image has no artifacts.
|
|
178
|
+
)
|
|
179
|
+
Put the score in a list such that output score = [naturalness, artifacts]
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
First lets look at the first set of input (1st image) as an example.
|
|
183
|
+
Output:
|
|
184
|
+
||V^=^V||
|
|
185
|
+
{
|
|
186
|
+
"score" : [5, 5],
|
|
187
|
+
"reasoning" : "The image gives an unnatural feeling on hands of the girl. There is also minor distortion on the eyes of the girl."
|
|
188
|
+
}
|
|
189
|
+
||V^=^V||
|
|
190
|
+
|
|
191
|
+
Now evaluate the second set of input (2nd image).
|
|
192
|
+
|
|
193
|
+
"""
|
|
194
|
+
|
|
195
|
+
_prompts_1shot_subject_image_gen_rule = """RULES of each set of inputs:
|
|
196
|
+
|
|
197
|
+
Two images will be provided: The first being a token subject image and the second being an AI-generated image using the first image as guidance.
|
|
198
|
+
The objective is to evaluate how successfully the image has been generated.
|
|
199
|
+
"""
|
|
200
|
+
|
|
201
|
+
_prompts_1shot_cig_rule_SC = """
|
|
202
|
+
From scale 0 to 10:
|
|
203
|
+
A score from 0 to 10 will be given based on the success in following the prompt.
|
|
204
|
+
(0 indicates that the second image does not follow the prompt at all. 10 indicates the second image follows the prompt perfectly.)
|
|
205
|
+
A second score from 0 to 10 will rate how well the generated image is following the guidance image.
|
|
206
|
+
(0 indicates that the second image is not following the guidance at all. 10 indicates that second image is following the guidance image.)
|
|
207
|
+
Put the score in a list such that output score = [score1, score2], where 'score1' evaluates the prompt and 'score2' evaluates the guidance.
|
|
208
|
+
|
|
209
|
+
First lets look at the first set of input (1st and 2nd images) as an example.
|
|
210
|
+
Text Prompt: the bridge is red, Golden Gate Bridge in San Francisco, USA
|
|
211
|
+
Output:
|
|
212
|
+
||V^=^V||
|
|
213
|
+
{
|
|
214
|
+
"score" : [5, 5],
|
|
215
|
+
"reasoning" : "The bridge is red. But half of the bridge is gone."
|
|
216
|
+
}
|
|
217
|
+
||V^=^V||
|
|
218
|
+
|
|
219
|
+
Now evaluate the second set of input (3th, 4th images).
|
|
220
|
+
Text Prompt: <prompt>
|
|
221
|
+
"""
|
|
222
|
+
|
|
223
|
+
_prompts_1shot_two_image_edit_rule = """RULES of each set of inputs:
|
|
224
|
+
|
|
225
|
+
Two images will be provided: The first being the original AI-generated image and the second being an edited version of the first.
|
|
226
|
+
The objective is to evaluate how successfully the editing instruction has been executed in the second image.
|
|
227
|
+
|
|
228
|
+
Note that sometimes the two images might look identical due to the failure of image edit.
|
|
229
|
+
"""
|
|
230
|
+
|
|
231
|
+
_prompts_1shot_subject_image_edit_rule = """RULES of each set of inputs:
|
|
232
|
+
|
|
233
|
+
Three images will be provided:
|
|
234
|
+
The first image is a input image to be edited.
|
|
235
|
+
The second image is a token subject image.
|
|
236
|
+
The third image is an AI-edited image from the first image. it should contain a subject that looks alike the subject in second image.
|
|
237
|
+
The objective is to evaluate how successfully the image has been edited.
|
|
238
|
+
"""
|
|
239
|
+
|
|
240
|
+
_prompts_1shot_control_image_gen_rule = """RULES of each set of inputs:
|
|
241
|
+
|
|
242
|
+
Two images will be provided: The first being a processed image (e.g. Canny edges, openpose, grayscale etc.) and the second being an AI-generated image using the first image as guidance.
|
|
243
|
+
The objective is to evaluate how successfully the image has been generated.
|
|
244
|
+
"""
|
|
245
|
+
|
|
246
|
+
_prompts_0shot_two_image_edit_rule = """RULES:
|
|
247
|
+
|
|
248
|
+
Two images will be provided: The first being the original AI-generated image and the second being an edited version of the first.
|
|
249
|
+
The objective is to evaluate how successfully the editing instruction has been executed in the second image.
|
|
250
|
+
|
|
251
|
+
Note that sometimes the two images might look identical due to the failure of image edit.
|
|
252
|
+
"""
|
|
253
|
+
|
|
254
|
+
_prompts_0shot_one_video_gen_rule = """RULES:
|
|
255
|
+
|
|
256
|
+
The images are extracted from a AI-generated video according to the text prompt.
|
|
257
|
+
The objective is to evaluate how successfully the video has been generated.
|
|
258
|
+
"""
|
|
259
|
+
|
|
260
|
+
_prompts_0shot_t2v_rule_PQ = """RULES:
|
|
261
|
+
|
|
262
|
+
The image frames are AI-generated.
|
|
263
|
+
The objective is to evaluate how successfully the image frames has been generated.
|
|
264
|
+
|
|
265
|
+
From scale 0 to 10:
|
|
266
|
+
A score from 0 to 10 will be given based on the image frames naturalness.
|
|
267
|
+
(
|
|
268
|
+
0 indicates that the scene in the image frames does not look natural at all or give a unnatural feeling such as wrong sense of distance, or wrong shadow, or wrong lighting.
|
|
269
|
+
10 indicates that the image frames looks natural.
|
|
270
|
+
)
|
|
271
|
+
A second score from 0 to 10 will rate the image frames artifacts.
|
|
272
|
+
(
|
|
273
|
+
0 indicates that the image frames contains a large portion of distortion, or watermark, or scratches, or blurred faces, or unusual body parts, or subjects not harmonized.
|
|
274
|
+
10 indicates the image frames has no artifacts.
|
|
275
|
+
)
|
|
276
|
+
Put the score in a list such that output score = [naturalness, artifacts]
|
|
277
|
+
"""
|
|
278
|
+
|
|
279
|
+
_prompts_0shot_msdig_rule_SC = """From scale 0 to 10:
|
|
280
|
+
A score from 0 to 10 will be given based on the success in following the prompt.
|
|
281
|
+
(0 indicates that the second image does not follow the prompt at all. 10 indicates the second image follows the prompt perfectly.)
|
|
282
|
+
A second score from 0 to 10 will rate how well the subject in the generated image resemble to the token subject in the first sub-image.
|
|
283
|
+
(0 indicates that the subject in the second image does not look like the token subject in the first sub-image at all. 10 indicates the subject in the second image look exactly alike the token subject in the first sub-image.)
|
|
284
|
+
A third score from 0 to 10 will rate how well the subject in the generated image resemble to the token subject in the second sub-image.
|
|
285
|
+
(0 indicates that the subject in the second image does not look like the token subject in the second sub-image at all. 10 indicates the subject in the second image look exactly alike the token subject in the second sub-image.)
|
|
286
|
+
Put the score in a list such that output score = [score1, score2, score3], where 'score1' evaluates the prompt and 'score2' evaluates the resemblance for the first sub-image, and 'score3' evaluates the resemblance for the second sub-image.
|
|
287
|
+
|
|
288
|
+
Text Prompt: <prompt>
|
|
289
|
+
"""
|
|
290
|
+
|
|
291
|
+
_prompts_0shot_sdie_rule_SC = """From scale 0 to 10:
|
|
292
|
+
A score from 0 to 10 will rate how well the subject in the generated image resemble to the token subject in the second image.
|
|
293
|
+
(0 indicates that the subject in the third image does not look like the token subject at all. 10 indicates the subject in the third image look exactly alike the token subject.)
|
|
294
|
+
A second score from 0 to 10 will rate the degree of overediting in the second image.
|
|
295
|
+
(0 indicates that the scene in the edited image is completely different from the first image. 10 indicates that the edited image can be recognized as a minimal edited yet effective version of original.)
|
|
296
|
+
Put the score in a list such that output score = [score1, score2], where 'score1' evaluates the resemblance and 'score2' evaluates the degree of overediting.
|
|
297
|
+
|
|
298
|
+
Subject: <subject>"""
|
|
299
|
+
|
|
300
|
+
_prompts_0shot_subject_image_edit_rule = """RULES:
|
|
301
|
+
|
|
302
|
+
Three images will be provided:
|
|
303
|
+
The first image is a input image to be edited.
|
|
304
|
+
The second image is a token subject image.
|
|
305
|
+
The third image is an AI-edited image from the first image. it should contain a subject that looks alike the subject in second image.
|
|
306
|
+
The objective is to evaluate how successfully the image has been edited.
|
|
307
|
+
"""
|
|
308
|
+
|
|
309
|
+
_prompts_0shot_mie_rule_SC = """From scale 0 to 10:
|
|
310
|
+
A score from 0 to 10 will be given based on the success of the editing. (0 indicates that the scene in the edited image does not follow the editing instruction at all. 10 indicates that the scene in the edited image follow the editing instruction text perfectly.)
|
|
311
|
+
A second score from 0 to 10 will rate the degree of overediting in the second image. (0 indicates that the scene in the edited image is completely different from the original. 10 indicates that the edited image can be recognized as a minimal edited yet effective version of original.)
|
|
312
|
+
Put the score in a list such that output score = [score1, score2], where 'score1' evaluates the editing success and 'score2' evaluates the degree of overediting.
|
|
313
|
+
|
|
314
|
+
Editing instruction: <instruction>
|
|
315
|
+
"""
|
|
316
|
+
|
|
317
|
+
_prompts_0shot_sdig_rule_SC = """From scale 0 to 10:
|
|
318
|
+
A score from 0 to 10 will be given based on the success in following the prompt.
|
|
319
|
+
(0 indicates that the second image does not follow the prompt at all. 10 indicates the second image follows the prompt perfectly.)
|
|
320
|
+
A second score from 0 to 10 will rate how well the subject in the generated image resemble to the token subject in the first image.
|
|
321
|
+
(0 indicates that the subject in the second image does not look like the token subject at all. 10 indicates the subject in the second image look exactly alike the token subject.)
|
|
322
|
+
Put the score in a list such that output score = [score1, score2], where 'score1' evaluates the prompt and 'score2' evaluates the resemblance.
|
|
323
|
+
|
|
324
|
+
Text Prompt: <prompt>
|
|
325
|
+
"""
|
|
326
|
+
|
|
327
|
+
_prompts_0shot_tie_rule_SC = """
|
|
328
|
+
From scale 0 to 10:
|
|
329
|
+
A score from 0 to 10 will be given based on the success of the editing. (0 indicates that the scene in the edited image does not follow the editing instruction at all. 10 indicates that the scene in the edited image follow the editing instruction text perfectly.)
|
|
330
|
+
A second score from 0 to 10 will rate the degree of overediting in the second image. (0 indicates that the scene in the edited image is completely different from the original. 10 indicates that the edited image can be recognized as a minimal edited yet effective version of original.)
|
|
331
|
+
Put the score in a list such that output score = [score1, score2], where 'score1' evaluates the editing success and 'score2' evaluates the degree of overediting.
|
|
332
|
+
|
|
333
|
+
Editing instruction: <instruction>
|
|
334
|
+
"""
|
|
335
|
+
|
|
336
|
+
_prompts_0shot_t2i_rule_SC = """From scale 0 to 10:
|
|
337
|
+
A score from 0 to 10 will be given based on the success in following the prompt.
|
|
338
|
+
(0 indicates that the AI generated image does not follow the prompt at all. 10 indicates the AI generated image follows the prompt perfectly.)
|
|
339
|
+
|
|
340
|
+
Put the score in a list such that output score = [score].
|
|
341
|
+
|
|
342
|
+
Text Prompt: <prompt>
|
|
343
|
+
"""
|
|
344
|
+
|
|
345
|
+
_prompts_0shot_cig_rule_SC = """From scale 0 to 10:
|
|
346
|
+
A score from 0 to 10 will be given based on the success in following the prompt.
|
|
347
|
+
(0 indicates that the second image does not follow the prompt at all. 10 indicates the second image follows the prompt perfectly.)
|
|
348
|
+
A second score from 0 to 10 will rate how well the generated image is following the guidance image.
|
|
349
|
+
(0 indicates that the second image is not following the guidance at all. 10 indicates that second image is following the guidance image.)
|
|
350
|
+
Put the score in a list such that output score = [score1, score2], where 'score1' evaluates the prompt and 'score2' evaluates the guidance.
|
|
351
|
+
|
|
352
|
+
Text Prompt: <prompt>"""
|
|
353
|
+
|
|
354
|
+
_prompts_0shot_control_image_gen_rule = """RULES:
|
|
355
|
+
|
|
356
|
+
Two images will be provided: The first being a processed image (e.g. Canny edges, openpose, grayscale etc.) and the second being an AI-generated image using the first image as guidance.
|
|
357
|
+
The objective is to evaluate how successfully the image has been generated.
|
|
358
|
+
"""
|
|
359
|
+
|
|
360
|
+
_prompts_0shot_rule_PQ = """RULES:
|
|
361
|
+
|
|
362
|
+
The image is an AI-generated image.
|
|
363
|
+
The objective is to evaluate how successfully the image has been generated.
|
|
364
|
+
|
|
365
|
+
From scale 0 to 10:
|
|
366
|
+
A score from 0 to 10 will be given based on image naturalness.
|
|
367
|
+
(
|
|
368
|
+
0 indicates that the scene in the image does not look natural at all or give a unnatural feeling such as wrong sense of distance, or wrong shadow, or wrong lighting.
|
|
369
|
+
10 indicates that the image looks natural.
|
|
370
|
+
)
|
|
371
|
+
A second score from 0 to 10 will rate the image artifacts.
|
|
372
|
+
(
|
|
373
|
+
0 indicates that the image contains a large portion of distortion, or watermark, or scratches, or blurred faces, or unusual body parts, or subjects not harmonized.
|
|
374
|
+
10 indicates the image has no artifacts.
|
|
375
|
+
)
|
|
376
|
+
Put the score in a list such that output score = [naturalness, artifacts]
|
|
377
|
+
"""
|
|
378
|
+
|
|
379
|
+
_prompts_0shot_t2v_rule_SC = """From scale 0 to 10:
|
|
380
|
+
A score from 0 to 10 will be given based on the success in following the prompt.
|
|
381
|
+
(0 indicates that the image frames does not follow the prompt at all. 10 indicates the image frames follows the prompt perfectly.)
|
|
382
|
+
|
|
383
|
+
Put the score in a list such that output score = [score].
|
|
384
|
+
|
|
385
|
+
Text Prompt: <prompt>
|
|
386
|
+
"""
|
|
387
|
+
|
|
388
|
+
_prompts_0shot_multi_subject_image_gen_rule = """RULES:
|
|
389
|
+
|
|
390
|
+
Two images will be provided:
|
|
391
|
+
This first image is a concatenation of two sub-images, each sub-image contain one token subject.
|
|
392
|
+
The second image being an AI-generated image using the first image as guidance.
|
|
393
|
+
The objective is to evaluate how successfully the image has been generated.
|
|
394
|
+
"""
|
|
395
|
+
|
|
396
|
+
_prompts_0shot_subject_image_gen_rule = """RULES:
|
|
397
|
+
|
|
398
|
+
Two images will be provided: The first being a token subject image and the second being an AI-generated image using the first image as guidance.
|
|
399
|
+
The objective is to evaluate how successfully the image has been generated.
|
|
400
|
+
"""
|
|
401
|
+
|
|
402
|
+
_prompts_0shot_one_image_gen_rule = """RULES:
|
|
403
|
+
|
|
404
|
+
The image is an AI-generated image according to the text prompt.
|
|
405
|
+
The objective is to evaluate how successfully the image has been generated.
|
|
406
|
+
"""
|
|
@@ -1,70 +1,35 @@
|
|
|
1
|
-
from evalscope.
|
|
2
|
-
from evalscope.
|
|
3
|
-
from evalscope.
|
|
4
|
-
from evalscope.
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
1
|
+
from evalscope.api.benchmark import BenchmarkMeta, MultiChoiceAdapter
|
|
2
|
+
from evalscope.api.dataset import Sample
|
|
3
|
+
from evalscope.api.registry import register_benchmark
|
|
4
|
+
from evalscope.constants import Tags
|
|
5
|
+
from evalscope.utils.multi_choices import MultipleChoiceTemplate
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@register_benchmark(
|
|
9
|
+
BenchmarkMeta(
|
|
10
|
+
name='iquiz',
|
|
11
|
+
pretty_name='IQuiz',
|
|
12
|
+
tags=[Tags.KNOWLEDGE, Tags.MULTIPLE_CHOICE, Tags.CHINESE],
|
|
13
|
+
description=
|
|
14
|
+
'IQuiz is a benchmark for evaluating AI models on IQ and EQ questions. It consists of multiple-choice questions where the model must select the correct answer and provide an explanation.', # noqa: E501
|
|
15
|
+
dataset_id='AI-ModelScope/IQuiz',
|
|
16
|
+
metric_list=['acc'],
|
|
17
|
+
subset_list=['IQ', 'EQ'],
|
|
18
|
+
few_shot_num=0,
|
|
19
|
+
train_split=None,
|
|
20
|
+
eval_split='test',
|
|
21
|
+
prompt_template=MultipleChoiceTemplate.CHINESE_SINGLE_ANSWER_TEMPLATE_COT,
|
|
22
|
+
)
|
|
22
23
|
)
|
|
23
|
-
class IQuizAdapter(
|
|
24
|
+
class IQuizAdapter(MultiChoiceAdapter):
|
|
24
25
|
|
|
25
26
|
def __init__(self, **kwargs):
|
|
26
27
|
super().__init__(**kwargs)
|
|
27
28
|
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
"question":"天气预报说本周星期三会下雨,昨天果然下雨了,今天星期几?",
|
|
36
|
-
"choices":["星期一","星期二","星期三","星期四"],
|
|
37
|
-
"answer":"D",
|
|
38
|
-
"level":1
|
|
39
|
-
}
|
|
40
|
-
"""
|
|
41
|
-
prompt = f"问题: {input_d['question']}\n"
|
|
42
|
-
prompt += self.__form_options(input_d['choices'])
|
|
43
|
-
return self.gen_prompt_data(prompt)
|
|
44
|
-
|
|
45
|
-
def __form_options(self, options: list):
|
|
46
|
-
option_str = '选项:\n'
|
|
47
|
-
for opt, choice in zip(options, self.choices):
|
|
48
|
-
option_str += f'({choice}): {opt}' + '\n'
|
|
49
|
-
return option_str
|
|
50
|
-
|
|
51
|
-
def get_gold_answer(self, input_d: dict) -> str:
|
|
52
|
-
"""
|
|
53
|
-
Parse the raw input labels (gold).
|
|
54
|
-
"""
|
|
55
|
-
return input_d['answer']
|
|
56
|
-
|
|
57
|
-
def parse_pred_result(self, result: str, raw_input_d: dict = None, eval_type: str = EvalType.CHECKPOINT) -> str:
|
|
58
|
-
"""
|
|
59
|
-
Parse the predicted result and extract proper answer.
|
|
60
|
-
"""
|
|
61
|
-
if self.model_adapter == OutputType.MULTIPLE_CHOICE:
|
|
62
|
-
return result
|
|
63
|
-
else:
|
|
64
|
-
return ResponseParser.parse_first_option_with_choices(result, self.choices)
|
|
65
|
-
|
|
66
|
-
def match(self, gold: str, pred: str) -> float:
|
|
67
|
-
"""
|
|
68
|
-
Match the gold answer and the predicted answer.
|
|
69
|
-
"""
|
|
70
|
-
return exact_match(gold=gold, pred=pred)
|
|
29
|
+
def record_to_sample(self, record) -> Sample:
|
|
30
|
+
return Sample(
|
|
31
|
+
input=record['question'],
|
|
32
|
+
choices=record['choices'],
|
|
33
|
+
target=record['answer'],
|
|
34
|
+
metadata={'level': record.get('level', 'unknown')},
|
|
35
|
+
)
|
|
@@ -130,8 +130,8 @@ def evaluate_generations(
|
|
|
130
130
|
results[index] = result
|
|
131
131
|
metadata[index] = meta
|
|
132
132
|
|
|
133
|
-
assert len(results
|
|
134
|
-
|
|
133
|
+
assert len(results
|
|
134
|
+
) == len(generations_list), f'results = {len(results)} inputs = {len(generations_list)} {results=}'
|
|
135
135
|
|
|
136
136
|
return results, metadata
|
|
137
137
|
|