evalscope 0.14.0__py3-none-any.whl → 0.15.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of evalscope might be problematic. Click here for more details.
- evalscope/arguments.py +2 -1
- evalscope/benchmarks/__init__.py +2 -2
- evalscope/benchmarks/aigc/__init__.py +0 -0
- evalscope/benchmarks/aigc/t2i/__init__.py +0 -0
- evalscope/benchmarks/aigc/t2i/base.py +56 -0
- evalscope/benchmarks/aigc/t2i/evalmuse_adapter.py +77 -0
- evalscope/benchmarks/aigc/t2i/genai_bench_adapter.py +58 -0
- evalscope/benchmarks/aigc/t2i/general_t2i_adapter.py +58 -0
- evalscope/benchmarks/aigc/t2i/hpdv2_adapter.py +57 -0
- evalscope/benchmarks/aigc/t2i/tifa_adapter.py +37 -0
- evalscope/benchmarks/aime/aime24_adapter.py +1 -1
- evalscope/benchmarks/aime/aime25_adapter.py +4 -4
- evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +1 -2
- evalscope/benchmarks/arc/arc_adapter.py +1 -1
- evalscope/benchmarks/arena_hard/arena_hard_adapter.py +1 -3
- evalscope/benchmarks/ceval/ceval_adapter.py +2 -2
- evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +1 -3
- evalscope/benchmarks/cmmlu/cmmlu_adapter.py +1 -1
- evalscope/benchmarks/competition_math/competition_math_adapter.py +1 -2
- evalscope/benchmarks/data_adapter.py +16 -9
- evalscope/benchmarks/data_collection/data_collection_adapter.py +6 -4
- evalscope/benchmarks/general_mcq/general_mcq_adapter.py +2 -2
- evalscope/benchmarks/general_qa/general_qa_adapter.py +3 -3
- evalscope/benchmarks/live_code_bench/evaluate_utils.py +16 -21
- evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +4 -1
- evalscope/benchmarks/live_code_bench/testing_util.py +6 -3
- evalscope/benchmarks/math_500/math_500_adapter.py +1 -1
- evalscope/benchmarks/mmlu/mmlu_adapter.py +3 -1
- evalscope/benchmarks/simple_qa/simple_qa_adapter.py +1 -2
- evalscope/benchmarks/utils.py +7 -16
- evalscope/cli/start_app.py +1 -1
- evalscope/collections/evaluator.py +16 -4
- evalscope/config.py +7 -3
- evalscope/constants.py +11 -0
- evalscope/evaluator/evaluator.py +9 -3
- evalscope/evaluator/reviewer/auto_reviewer.py +1 -1
- evalscope/metrics/__init__.py +49 -4
- evalscope/metrics/llm_judge.py +1 -1
- evalscope/metrics/named_metrics.py +13 -0
- evalscope/metrics/t2v_metrics/__init__.py +66 -0
- evalscope/metrics/t2v_metrics/clipscore.py +14 -0
- evalscope/metrics/t2v_metrics/constants.py +12 -0
- evalscope/metrics/t2v_metrics/itmscore.py +14 -0
- evalscope/metrics/t2v_metrics/models/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/__init__.py +30 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/base_model.py +6 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +132 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +286 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +114 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +86 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +85 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +62 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/__init__.py +26 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +84 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +97 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +171 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +80 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +73 -0
- evalscope/metrics/t2v_metrics/models/model.py +45 -0
- evalscope/metrics/t2v_metrics/models/utils.py +25 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/__init__.py +22 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/__init__.py +1 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +300 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/builder.py +12 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +82 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_projector/builder.py +50 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +218 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +150 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/__init__.py +26 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +465 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +141 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +22 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +188 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +106 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +307 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +416 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +8 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +191 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +318 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/default.yaml +10 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_flant5xl.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt2.7b.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt6.7b.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_coco.yaml +36 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xl.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xxl.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna13b.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain.yaml +36 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_no_prefix.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_prefix.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_vitL.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xxl.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt2.7b.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt6.7b.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_vitL.yaml +37 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna13b.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna7b.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config.json +21 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config_albef.json +22 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_large_config.json +21 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +208 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/base_model.py +231 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +1093 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2.py +211 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_image_text_matching.py +109 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +452 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +364 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +755 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +273 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +880 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +1844 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +81 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +56 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_caption.py +212 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_classification.py +164 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_feature_extractor.py +202 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +185 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +178 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +112 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_pretrain.py +371 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +344 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +858 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +271 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +503 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +1270 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +473 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +31 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/base_processor.py +27 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/blip_processors.py +233 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +392 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +127 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +17 -0
- evalscope/metrics/t2v_metrics/score.py +78 -0
- evalscope/metrics/t2v_metrics/vqascore.py +14 -0
- evalscope/models/__init__.py +50 -14
- evalscope/models/adapters/__init__.py +17 -0
- evalscope/models/{base_adapter.py → adapters/base_adapter.py} +17 -17
- evalscope/models/{chat_adapter.py → adapters/chat_adapter.py} +10 -7
- evalscope/models/{choice_adapter.py → adapters/choice_adapter.py} +2 -6
- evalscope/models/{custom_adapter.py → adapters/custom_adapter.py} +2 -4
- evalscope/models/{server_adapter.py → adapters/server_adapter.py} +1 -3
- evalscope/models/adapters/t2i_adapter.py +76 -0
- evalscope/models/custom/__init__.py +2 -1
- evalscope/models/custom/dummy_model.py +11 -13
- evalscope/models/local_model.py +82 -33
- evalscope/models/model.py +2 -42
- evalscope/models/register.py +26 -0
- evalscope/perf/benchmark.py +4 -3
- evalscope/perf/main.py +4 -2
- evalscope/perf/plugin/datasets/flickr8k.py +2 -1
- evalscope/perf/utils/benchmark_util.py +2 -2
- evalscope/perf/utils/db_util.py +16 -8
- evalscope/report/__init__.py +1 -0
- evalscope/report/app.py +117 -67
- evalscope/report/app_arguments.py +11 -0
- evalscope/report/generator.py +1 -1
- evalscope/run.py +3 -3
- evalscope/third_party/thinkbench/eval.py +19 -7
- evalscope/utils/chat_service.py +2 -2
- evalscope/utils/import_utils.py +66 -0
- evalscope/utils/utils.py +12 -4
- evalscope/version.py +2 -2
- {evalscope-0.14.0.dist-info → evalscope-0.15.1.dist-info}/METADATA +20 -3
- {evalscope-0.14.0.dist-info → evalscope-0.15.1.dist-info}/RECORD +178 -66
- tests/aigc/__init__.py +1 -0
- tests/aigc/test_t2i.py +87 -0
- tests/cli/test_run.py +20 -7
- tests/perf/test_perf.py +6 -3
- evalscope/metrics/code_metric.py +0 -98
- evalscope/metrics/resources/gpt2-zhcn3-v4.bpe +0 -58485
- evalscope/metrics/resources/gpt2-zhcn3-v4.json +0 -1
- {evalscope-0.14.0.dist-info → evalscope-0.15.1.dist-info}/LICENSE +0 -0
- {evalscope-0.14.0.dist-info → evalscope-0.15.1.dist-info}/WHEEL +0 -0
- {evalscope-0.14.0.dist-info → evalscope-0.15.1.dist-info}/entry_points.txt +0 -0
- {evalscope-0.14.0.dist-info → evalscope-0.15.1.dist-info}/top_level.txt +0 -0
evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain.yaml
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# Copyright (c) 2022, salesforce.com, inc.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
4
|
+
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
|
5
|
+
|
|
6
|
+
model:
|
|
7
|
+
arch: pretrain
|
|
8
|
+
load_finetuned: False
|
|
9
|
+
|
|
10
|
+
pretrained: "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained.pth"
|
|
11
|
+
finetuned: ""
|
|
12
|
+
|
|
13
|
+
# vit encoder
|
|
14
|
+
image_size: 224
|
|
15
|
+
drop_path_rate: 0
|
|
16
|
+
use_grad_checkpoint: False
|
|
17
|
+
vit_precision: "fp16"
|
|
18
|
+
freeze_vit: True
|
|
19
|
+
|
|
20
|
+
# Q-Former
|
|
21
|
+
num_query_token: 32
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
preprocess:
|
|
25
|
+
vis_processor:
|
|
26
|
+
train:
|
|
27
|
+
name: "blip_image_train"
|
|
28
|
+
image_size: 224
|
|
29
|
+
eval:
|
|
30
|
+
name: "blip_image_eval"
|
|
31
|
+
image_size: 224
|
|
32
|
+
text_processor:
|
|
33
|
+
train:
|
|
34
|
+
name: "blip_caption"
|
|
35
|
+
eval:
|
|
36
|
+
name: "blip_caption"
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# Copyright (c) 2022, salesforce.com, inc.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
4
|
+
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
|
5
|
+
|
|
6
|
+
model:
|
|
7
|
+
arch: pretrain_flant5xl
|
|
8
|
+
load_finetuned: False
|
|
9
|
+
|
|
10
|
+
pretrained: "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xl.pth"
|
|
11
|
+
finetuned: ""
|
|
12
|
+
|
|
13
|
+
# vit encoder
|
|
14
|
+
image_size: 224
|
|
15
|
+
drop_path_rate: 0
|
|
16
|
+
use_grad_checkpoint: False
|
|
17
|
+
vit_precision: "fp16"
|
|
18
|
+
freeze_vit: True
|
|
19
|
+
|
|
20
|
+
# Q-Former
|
|
21
|
+
num_query_token: 32
|
|
22
|
+
|
|
23
|
+
# T5
|
|
24
|
+
t5_model: "google/flan-t5-xl"
|
|
25
|
+
|
|
26
|
+
# generation configs
|
|
27
|
+
prompt: ""
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
preprocess:
|
|
31
|
+
vis_processor:
|
|
32
|
+
train:
|
|
33
|
+
name: "blip_image_train"
|
|
34
|
+
image_size: 224
|
|
35
|
+
eval:
|
|
36
|
+
name: "blip_image_eval"
|
|
37
|
+
image_size: 224
|
|
38
|
+
text_processor:
|
|
39
|
+
train:
|
|
40
|
+
name: "blip_caption"
|
|
41
|
+
eval:
|
|
42
|
+
name: "blip_caption"
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# Copyright (c) 2022, salesforce.com, inc.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
4
|
+
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
|
5
|
+
|
|
6
|
+
model:
|
|
7
|
+
arch: pretrain_flant5xl
|
|
8
|
+
load_finetuned: False
|
|
9
|
+
|
|
10
|
+
pretrained: "lavis/output/BLIP2/Pretrain_stage2_flant5_xl_batch_80_no_prefix_iter_100000/20231015004/checkpoint_80000.pth"
|
|
11
|
+
finetuned: ""
|
|
12
|
+
|
|
13
|
+
# vit encoder
|
|
14
|
+
image_size: 224
|
|
15
|
+
drop_path_rate: 0
|
|
16
|
+
use_grad_checkpoint: False
|
|
17
|
+
vit_precision: "fp16"
|
|
18
|
+
freeze_vit: True
|
|
19
|
+
|
|
20
|
+
# Q-Former
|
|
21
|
+
num_query_token: 32
|
|
22
|
+
|
|
23
|
+
# T5
|
|
24
|
+
t5_model: "google/flan-t5-xl"
|
|
25
|
+
|
|
26
|
+
# generation configs
|
|
27
|
+
prompt: ""
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
preprocess:
|
|
31
|
+
vis_processor:
|
|
32
|
+
train:
|
|
33
|
+
name: "blip_image_train"
|
|
34
|
+
image_size: 224
|
|
35
|
+
eval:
|
|
36
|
+
name: "blip_image_eval"
|
|
37
|
+
image_size: 224
|
|
38
|
+
text_processor:
|
|
39
|
+
train:
|
|
40
|
+
name: "blip_caption"
|
|
41
|
+
eval:
|
|
42
|
+
name: "blip_caption"
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# Copyright (c) 2022, salesforce.com, inc.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
4
|
+
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
|
5
|
+
|
|
6
|
+
model:
|
|
7
|
+
arch: pretrain_flant5xl
|
|
8
|
+
load_finetuned: False
|
|
9
|
+
|
|
10
|
+
pretrained: "lavis/output/BLIP2/Pretrain_stage2_flant5_xl_batch_80_prefix_iter_100000/20231015004/checkpoint_80000.pth"
|
|
11
|
+
finetuned: ""
|
|
12
|
+
|
|
13
|
+
# vit encoder
|
|
14
|
+
image_size: 224
|
|
15
|
+
drop_path_rate: 0
|
|
16
|
+
use_grad_checkpoint: False
|
|
17
|
+
vit_precision: "fp16"
|
|
18
|
+
freeze_vit: True
|
|
19
|
+
|
|
20
|
+
# Q-Former
|
|
21
|
+
num_query_token: 32
|
|
22
|
+
|
|
23
|
+
# T5
|
|
24
|
+
t5_model: "google/flan-t5-xl"
|
|
25
|
+
|
|
26
|
+
# generation configs
|
|
27
|
+
prompt: ""
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
preprocess:
|
|
31
|
+
vis_processor:
|
|
32
|
+
train:
|
|
33
|
+
name: "blip_image_train"
|
|
34
|
+
image_size: 224
|
|
35
|
+
eval:
|
|
36
|
+
name: "blip_image_eval"
|
|
37
|
+
image_size: 224
|
|
38
|
+
text_processor:
|
|
39
|
+
train:
|
|
40
|
+
name: "blip_caption"
|
|
41
|
+
eval:
|
|
42
|
+
name: "blip_caption"
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# Copyright (c) 2022, salesforce.com, inc.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
4
|
+
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
|
5
|
+
|
|
6
|
+
model:
|
|
7
|
+
arch: pretrain_flant5xl
|
|
8
|
+
load_finetuned: False
|
|
9
|
+
|
|
10
|
+
pretrained: "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xl_vitL.pth"
|
|
11
|
+
finetuned: ""
|
|
12
|
+
|
|
13
|
+
# vit encoder
|
|
14
|
+
vit_model: "clip_L"
|
|
15
|
+
image_size: 224
|
|
16
|
+
drop_path_rate: 0
|
|
17
|
+
use_grad_checkpoint: False
|
|
18
|
+
vit_precision: "fp16"
|
|
19
|
+
freeze_vit: True
|
|
20
|
+
|
|
21
|
+
# Q-Former
|
|
22
|
+
num_query_token: 32
|
|
23
|
+
|
|
24
|
+
# T5
|
|
25
|
+
t5_model: "google/flan-t5-xl"
|
|
26
|
+
|
|
27
|
+
# generation configs
|
|
28
|
+
prompt: ""
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
preprocess:
|
|
32
|
+
vis_processor:
|
|
33
|
+
train:
|
|
34
|
+
name: "blip_image_train"
|
|
35
|
+
image_size: 224
|
|
36
|
+
eval:
|
|
37
|
+
name: "blip_image_eval"
|
|
38
|
+
image_size: 224
|
|
39
|
+
text_processor:
|
|
40
|
+
train:
|
|
41
|
+
name: "blip_caption"
|
|
42
|
+
eval:
|
|
43
|
+
name: "blip_caption"
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# Copyright (c) 2022, salesforce.com, inc.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
4
|
+
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
|
5
|
+
|
|
6
|
+
model:
|
|
7
|
+
arch: pretrain_flant5xxl
|
|
8
|
+
load_finetuned: False
|
|
9
|
+
|
|
10
|
+
pretrained: "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xxl.pth"
|
|
11
|
+
finetuned: ""
|
|
12
|
+
|
|
13
|
+
# vit encoder
|
|
14
|
+
image_size: 224
|
|
15
|
+
drop_path_rate: 0
|
|
16
|
+
use_grad_checkpoint: False
|
|
17
|
+
vit_precision: "fp16"
|
|
18
|
+
freeze_vit: True
|
|
19
|
+
|
|
20
|
+
# Q-Former
|
|
21
|
+
num_query_token: 32
|
|
22
|
+
|
|
23
|
+
# T5
|
|
24
|
+
t5_model: "google/flan-t5-xxl"
|
|
25
|
+
|
|
26
|
+
# generation configs
|
|
27
|
+
prompt: ""
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
preprocess:
|
|
31
|
+
vis_processor:
|
|
32
|
+
train:
|
|
33
|
+
name: "blip_image_train"
|
|
34
|
+
image_size: 224
|
|
35
|
+
eval:
|
|
36
|
+
name: "blip_image_eval"
|
|
37
|
+
image_size: 224
|
|
38
|
+
text_processor:
|
|
39
|
+
train:
|
|
40
|
+
name: "blip_caption"
|
|
41
|
+
eval:
|
|
42
|
+
name: "blip_caption"
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# Copyright (c) 2022, salesforce.com, inc.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
4
|
+
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
|
5
|
+
|
|
6
|
+
model:
|
|
7
|
+
arch: pretrain_opt2.7b
|
|
8
|
+
load_finetuned: False
|
|
9
|
+
|
|
10
|
+
pretrained: "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_opt2.7b.pth"
|
|
11
|
+
finetuned: ""
|
|
12
|
+
|
|
13
|
+
# vit encoder
|
|
14
|
+
image_size: 224
|
|
15
|
+
drop_path_rate: 0
|
|
16
|
+
use_grad_checkpoint: False
|
|
17
|
+
vit_precision: "fp16"
|
|
18
|
+
freeze_vit: True
|
|
19
|
+
|
|
20
|
+
# Q-Former
|
|
21
|
+
num_query_token: 32
|
|
22
|
+
|
|
23
|
+
# OPT
|
|
24
|
+
opt_model: "facebook/opt-2.7b"
|
|
25
|
+
|
|
26
|
+
# generation configs
|
|
27
|
+
prompt: ""
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
preprocess:
|
|
31
|
+
vis_processor:
|
|
32
|
+
train:
|
|
33
|
+
name: "blip_image_train"
|
|
34
|
+
image_size: 224
|
|
35
|
+
eval:
|
|
36
|
+
name: "blip_image_eval"
|
|
37
|
+
image_size: 224
|
|
38
|
+
text_processor:
|
|
39
|
+
train:
|
|
40
|
+
name: "blip_caption"
|
|
41
|
+
eval:
|
|
42
|
+
name: "blip_caption"
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# Copyright (c) 2022, salesforce.com, inc.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
4
|
+
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
|
5
|
+
|
|
6
|
+
model:
|
|
7
|
+
arch: pretrain_opt6.7b
|
|
8
|
+
load_finetuned: False
|
|
9
|
+
|
|
10
|
+
pretrained: "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_opt6.7b.pth"
|
|
11
|
+
finetuned: ""
|
|
12
|
+
|
|
13
|
+
# vit encoder
|
|
14
|
+
image_size: 224
|
|
15
|
+
drop_path_rate: 0
|
|
16
|
+
use_grad_checkpoint: False
|
|
17
|
+
vit_precision: "fp16"
|
|
18
|
+
freeze_vit: True
|
|
19
|
+
|
|
20
|
+
# Q-Former
|
|
21
|
+
num_query_token: 32
|
|
22
|
+
|
|
23
|
+
# OPT
|
|
24
|
+
opt_model: "facebook/opt-6.7b"
|
|
25
|
+
|
|
26
|
+
# generation configs
|
|
27
|
+
prompt: ""
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
preprocess:
|
|
31
|
+
vis_processor:
|
|
32
|
+
train:
|
|
33
|
+
name: "blip_image_train"
|
|
34
|
+
image_size: 224
|
|
35
|
+
eval:
|
|
36
|
+
name: "blip_image_eval"
|
|
37
|
+
image_size: 224
|
|
38
|
+
text_processor:
|
|
39
|
+
train:
|
|
40
|
+
name: "blip_caption"
|
|
41
|
+
eval:
|
|
42
|
+
name: "blip_caption"
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
# Copyright (c) 2022, salesforce.com, inc.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
4
|
+
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
|
5
|
+
|
|
6
|
+
model:
|
|
7
|
+
arch: pretrain
|
|
8
|
+
load_finetuned: False
|
|
9
|
+
|
|
10
|
+
pretrained: "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_vitL.pth"
|
|
11
|
+
finetuned: ""
|
|
12
|
+
|
|
13
|
+
# vit encoder
|
|
14
|
+
vit_model: "clip_L"
|
|
15
|
+
image_size: 224
|
|
16
|
+
drop_path_rate: 0
|
|
17
|
+
use_grad_checkpoint: False
|
|
18
|
+
vit_precision: "fp16"
|
|
19
|
+
freeze_vit: True
|
|
20
|
+
|
|
21
|
+
# Q-Former
|
|
22
|
+
num_query_token: 32
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
preprocess:
|
|
26
|
+
vis_processor:
|
|
27
|
+
train:
|
|
28
|
+
name: "blip_image_train"
|
|
29
|
+
image_size: 224
|
|
30
|
+
eval:
|
|
31
|
+
name: "blip_image_eval"
|
|
32
|
+
image_size: 224
|
|
33
|
+
text_processor:
|
|
34
|
+
train:
|
|
35
|
+
name: "blip_caption"
|
|
36
|
+
eval:
|
|
37
|
+
name: "blip_caption"
|
evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna13b.yaml
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# Copyright (c) 2022, salesforce.com, inc.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
4
|
+
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
|
5
|
+
|
|
6
|
+
model:
|
|
7
|
+
arch: blip2_vicuna13b
|
|
8
|
+
load_finetuned: False
|
|
9
|
+
load_pretrained: True
|
|
10
|
+
|
|
11
|
+
pretrained: "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_vicuna13b.pth"
|
|
12
|
+
finetuned: ""
|
|
13
|
+
|
|
14
|
+
# vit encoder
|
|
15
|
+
image_size: 224
|
|
16
|
+
drop_path_rate: 0
|
|
17
|
+
use_grad_checkpoint: False
|
|
18
|
+
vit_precision: "fp16"
|
|
19
|
+
freeze_vit: True
|
|
20
|
+
|
|
21
|
+
# Q-Former
|
|
22
|
+
num_query_token: 32
|
|
23
|
+
|
|
24
|
+
# path to Vicuna checkpoint
|
|
25
|
+
llm_model: "lmsys/vicuna-13b-v1.1"
|
|
26
|
+
|
|
27
|
+
# generation configs
|
|
28
|
+
prompt: ""
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
preprocess:
|
|
32
|
+
vis_processor:
|
|
33
|
+
train:
|
|
34
|
+
name: "blip2_image_train"
|
|
35
|
+
image_size: 224
|
|
36
|
+
eval:
|
|
37
|
+
name: "blip_image_eval"
|
|
38
|
+
image_size: 224
|
|
39
|
+
text_processor:
|
|
40
|
+
train:
|
|
41
|
+
name: "blip_caption"
|
|
42
|
+
eval:
|
|
43
|
+
name: "blip_caption"
|
evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna7b.yaml
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# Copyright (c) 2022, salesforce.com, inc.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
4
|
+
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
|
5
|
+
|
|
6
|
+
model:
|
|
7
|
+
arch: blip2_vicuna7b
|
|
8
|
+
load_finetuned: False
|
|
9
|
+
load_pretrained: True
|
|
10
|
+
|
|
11
|
+
pretrained: "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_vicuna7b.pth"
|
|
12
|
+
finetuned: ""
|
|
13
|
+
|
|
14
|
+
# vit encoder
|
|
15
|
+
image_size: 224
|
|
16
|
+
drop_path_rate: 0
|
|
17
|
+
use_grad_checkpoint: False
|
|
18
|
+
vit_precision: "fp16"
|
|
19
|
+
freeze_vit: True
|
|
20
|
+
|
|
21
|
+
# Q-Former
|
|
22
|
+
num_query_token: 32
|
|
23
|
+
|
|
24
|
+
# path to Vicuna checkpoint
|
|
25
|
+
llm_model: "lmsys/vicuna-7b-v1.1"
|
|
26
|
+
|
|
27
|
+
# generation configs
|
|
28
|
+
prompt: ""
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
preprocess:
|
|
32
|
+
vis_processor:
|
|
33
|
+
train:
|
|
34
|
+
name: "blip2_image_train"
|
|
35
|
+
image_size: 224
|
|
36
|
+
eval:
|
|
37
|
+
name: "blip_image_eval"
|
|
38
|
+
image_size: 224
|
|
39
|
+
text_processor:
|
|
40
|
+
train:
|
|
41
|
+
name: "blip_caption"
|
|
42
|
+
eval:
|
|
43
|
+
name: "blip_caption"
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
{
|
|
2
|
+
"architectures": [
|
|
3
|
+
"BertModel"
|
|
4
|
+
],
|
|
5
|
+
"attention_probs_dropout_prob": 0.1,
|
|
6
|
+
"hidden_act": "gelu",
|
|
7
|
+
"hidden_dropout_prob": 0.1,
|
|
8
|
+
"hidden_size": 768,
|
|
9
|
+
"initializer_range": 0.02,
|
|
10
|
+
"intermediate_size": 3072,
|
|
11
|
+
"layer_norm_eps": 1e-12,
|
|
12
|
+
"max_position_embeddings": 512,
|
|
13
|
+
"model_type": "bert",
|
|
14
|
+
"num_attention_heads": 12,
|
|
15
|
+
"num_hidden_layers": 12,
|
|
16
|
+
"pad_token_id": 0,
|
|
17
|
+
"add_type_embeddings": false,
|
|
18
|
+
"vocab_size": 30524,
|
|
19
|
+
"encoder_width": 768,
|
|
20
|
+
"add_cross_attention": true
|
|
21
|
+
}
|
evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config_albef.json
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
{
|
|
2
|
+
"architectures": [
|
|
3
|
+
"BertModel"
|
|
4
|
+
],
|
|
5
|
+
"attention_probs_dropout_prob": 0.1,
|
|
6
|
+
"hidden_act": "gelu",
|
|
7
|
+
"hidden_dropout_prob": 0.1,
|
|
8
|
+
"hidden_size": 768,
|
|
9
|
+
"initializer_range": 0.02,
|
|
10
|
+
"intermediate_size": 3072,
|
|
11
|
+
"layer_norm_eps": 1e-12,
|
|
12
|
+
"max_position_embeddings": 512,
|
|
13
|
+
"model_type": "bert",
|
|
14
|
+
"num_attention_heads": 12,
|
|
15
|
+
"num_hidden_layers": 12,
|
|
16
|
+
"pad_token_id": 0,
|
|
17
|
+
"add_type_embeddings": false,
|
|
18
|
+
"vocab_size": 30522,
|
|
19
|
+
"encoder_width": 768,
|
|
20
|
+
"add_cross_attention": true,
|
|
21
|
+
"fusion_layer": 6
|
|
22
|
+
}
|
evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_large_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
{
|
|
2
|
+
"architectures": [
|
|
3
|
+
"BertModel"
|
|
4
|
+
],
|
|
5
|
+
"attention_probs_dropout_prob": 0.1,
|
|
6
|
+
"hidden_act": "gelu",
|
|
7
|
+
"hidden_dropout_prob": 0.1,
|
|
8
|
+
"hidden_size": 768,
|
|
9
|
+
"initializer_range": 0.02,
|
|
10
|
+
"intermediate_size": 3072,
|
|
11
|
+
"layer_norm_eps": 1e-12,
|
|
12
|
+
"max_position_embeddings": 512,
|
|
13
|
+
"model_type": "bert",
|
|
14
|
+
"num_attention_heads": 12,
|
|
15
|
+
"num_hidden_layers": 12,
|
|
16
|
+
"pad_token_id": 0,
|
|
17
|
+
"add_type_embeddings": false,
|
|
18
|
+
"vocab_size": 30524,
|
|
19
|
+
"encoder_width": 1024,
|
|
20
|
+
"add_cross_attention": true
|
|
21
|
+
}
|