fusion-bench 0.2.6__py3-none-any.whl → 0.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fusion_bench/compat/method/__init__.py +1 -0
- fusion_bench/compat/method/base_algorithm.py +7 -1
- fusion_bench/compat/modelpool/__init__.py +1 -1
- fusion_bench/compat/taskpool/__init__.py +1 -1
- fusion_bench/dataset/arc_agi/arc.py +5 -0
- fusion_bench/dataset/arc_agi/preprocess.py +1 -1
- fusion_bench/dataset/clip_dataset.py +3 -0
- fusion_bench/dataset/fer2013.py +12 -0
- fusion_bench/dataset/llama/__init__.py +1 -0
- fusion_bench/dataset/llama/alpaca.py +93 -3
- fusion_bench/dataset/llama/collate.py +62 -2
- fusion_bench/dataset/llama/metamathqa.py +50 -0
- fusion_bench/dataset/llama/preference_700k.py +70 -0
- fusion_bench/dataset/llama/stanford_shp.py +90 -0
- fusion_bench/dataset/llama/ultrachat.py +58 -0
- fusion_bench/dataset/llama/utils/__init__.py +0 -0
- fusion_bench/method/__init__.py +3 -1
- fusion_bench/method/adamerging/layer_wise_adamerging.py +11 -4
- fusion_bench/method/adamerging/min_norm_solvers.py +4 -4
- fusion_bench/method/classification/clip_finetune.py +10 -13
- fusion_bench/method/linear/expo.py +39 -0
- fusion_bench/method/lm_finetune/__init__.py +1 -0
- fusion_bench/method/lm_finetune/bradley_terry_rm.py +432 -0
- fusion_bench/method/lm_finetune/fullfinetune_sft.py +90 -160
- fusion_bench/method/lm_finetune/peftfinetune_sft.py +49 -139
- fusion_bench/method/pruning/llama_magnitude_prune.py +2 -2
- fusion_bench/method/pruning/llama_random_prune.py +2 -2
- fusion_bench/method/surgery/__init__.py +1 -0
- fusion_bench/method/surgery/clip_layer_wise_adamerging_surgery.py +157 -0
- fusion_bench/method/tall_mask/__init__.py +0 -0
- fusion_bench/method/tall_mask/utils.py +234 -0
- fusion_bench/method/task_singular_vector/TSVC.py +16 -0
- fusion_bench/method/task_singular_vector/TSVM.py +63 -0
- fusion_bench/method/task_singular_vector/__init__.py +9 -0
- fusion_bench/method/task_singular_vector/utils/TSVC_utils.py +50 -0
- fusion_bench/method/task_singular_vector/utils/TSVM_utils.py +642 -0
- fusion_bench/method/task_singular_vector/utils/__init__.py +7 -0
- fusion_bench/method/ties_merging/ties_merging_utils.py +7 -2
- fusion_bench/mixins/__init__.py +2 -0
- fusion_bench/mixins/clip_classification.py +64 -11
- fusion_bench/mixins/fabric_training.py +320 -0
- fusion_bench/mixins/lightning_fabric.py +12 -1
- fusion_bench/modelpool/__init__.py +2 -0
- fusion_bench/modelpool/base_pool.py +0 -1
- fusion_bench/modelpool/causal_lm/__init__.py +1 -1
- fusion_bench/modelpool/causal_lm/causal_lm.py +21 -22
- fusion_bench/modelpool/clip_vision/modelpool.py +92 -8
- fusion_bench/modelpool/seq_classification_lm/__init__.py +2 -0
- fusion_bench/modelpool/seq_classification_lm/reward_model.py +15 -0
- fusion_bench/modelpool/seq_classification_lm/seq_classification_lm.py +98 -0
- fusion_bench/models/chat_templates/__init__.py +1 -0
- fusion_bench/models/chat_templates/llama_3_Instruct.py +1 -0
- fusion_bench/models/chat_templates/load_tokenizer.py +43 -0
- fusion_bench/models/hf_clip.py +50 -9
- fusion_bench/models/surgery/__init__.py +1 -0
- fusion_bench/models/surgery/surgerymodelwrapper.py +158 -0
- fusion_bench/models/utils.py +8 -0
- fusion_bench/models/wrappers/layer_wise_fusion.py +14 -5
- fusion_bench/models/wrappers/task_wise_fusion.py +5 -5
- fusion_bench/optim/__init__.py +2 -0
- fusion_bench/optim/exception.py +47 -0
- fusion_bench/optim/lr_scheduler/__init__.py +1 -0
- fusion_bench/optim/lr_scheduler/linear_warmup.py +222 -0
- fusion_bench/optim/lr_scheduler/utils/__init__.py +1 -0
- fusion_bench/optim/lr_scheduler/utils/visualization.py +119 -0
- fusion_bench/optim/mezo.py +0 -2
- fusion_bench/programs/fabric_fusion_program.py +12 -5
- fusion_bench/taskpool/clip_vision/taskpool.py +43 -6
- fusion_bench/taskpool/llama/reward_model.py +157 -0
- fusion_bench/taskpool/nyuv2_taskpool.py +2 -0
- fusion_bench/tasks/clip_classification/__init__.py +13 -45
- fusion_bench/tasks/clip_classification/clip_dataset.py +1 -16
- fusion_bench/tasks/clip_classification/cub_200_2011.py +208 -0
- fusion_bench/tasks/clip_classification/emnist_letters.py +31 -0
- fusion_bench/tasks/clip_classification/emnist_mnist.py +5 -0
- fusion_bench/tasks/clip_classification/fashion_mnist.py +18 -0
- fusion_bench/tasks/clip_classification/fer2013.py +18 -0
- fusion_bench/tasks/clip_classification/food101.py +105 -0
- fusion_bench/tasks/clip_classification/kmnist.py +17 -0
- fusion_bench/tasks/clip_classification/mongo_leaf_disease.py +19 -0
- fusion_bench/tasks/clip_classification/pcam.py +5 -0
- fusion_bench/utils/hydra_utils.py +22 -0
- fusion_bench/utils/parameters.py +12 -3
- fusion_bench/utils/plot/__init__.py +0 -0
- fusion_bench/utils/plot/token.py +52 -0
- fusion_bench/utils/plot/token_notebook.py +127 -0
- fusion_bench/utils/type.py +14 -3
- {fusion_bench-0.2.6.dist-info → fusion_bench-0.2.8.dist-info}/METADATA +1 -1
- {fusion_bench-0.2.6.dist-info → fusion_bench-0.2.8.dist-info}/RECORD +263 -90
- fusion_bench_config/clip-vit-base-patch32_robustness_corrupted.yaml +1 -1
- fusion_bench_config/dataset/image_classification/README.md +6 -0
- fusion_bench_config/dataset/image_classification/test/TALL14.yaml +20 -0
- fusion_bench_config/dataset/image_classification/test/TALL20.yaml +28 -0
- fusion_bench_config/dataset/image_classification/test/cifar10.yaml +1 -1
- fusion_bench_config/dataset/image_classification/test/cifar100.yaml +1 -1
- fusion_bench_config/dataset/image_classification/test/cub-200-2011.yaml +4 -0
- fusion_bench_config/dataset/image_classification/test/emnist_letters.yaml +5 -0
- fusion_bench_config/dataset/image_classification/test/emnist_mnist.yaml +4 -0
- fusion_bench_config/dataset/image_classification/test/fashion_mnist.yaml +4 -0
- fusion_bench_config/dataset/image_classification/test/fer2013.yaml +3 -0
- fusion_bench_config/dataset/image_classification/test/food101.yaml +4 -0
- fusion_bench_config/dataset/image_classification/test/kmnist.yaml +4 -0
- fusion_bench_config/dataset/image_classification/test/mango-leaf-disease.yaml +4 -0
- fusion_bench_config/dataset/image_classification/test/oxford-iiit-pet.yaml +4 -0
- fusion_bench_config/dataset/image_classification/test/oxford_flowers102.yaml +4 -0
- fusion_bench_config/dataset/image_classification/test/pcam.yaml +4 -0
- fusion_bench_config/dataset/image_classification/test/rendered-sst2.yaml +4 -0
- fusion_bench_config/dataset/image_classification/test/stl10.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/TALL14.yaml +20 -0
- fusion_bench_config/dataset/image_classification/train/TALL20.yaml +28 -0
- fusion_bench_config/dataset/image_classification/train/cifar10.yaml +1 -1
- fusion_bench_config/dataset/image_classification/train/cifar100.yaml +1 -1
- fusion_bench_config/dataset/image_classification/train/cub-200-2011.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/emnist_letters.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/emnist_mnist.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/fashion_mnist.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/fer2013.yaml +3 -0
- fusion_bench_config/dataset/image_classification/train/food101.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/kmnist.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/mango-leaf-disease.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/oxford-iiit-pet.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/oxford_flowers102.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/pcam.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/rendered-sst2.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/stl10.yaml +4 -0
- fusion_bench_config/dataset/llm_sft/alpaca_cleaned.yaml +6 -0
- fusion_bench_config/dataset/llm_sft/ultrachat_200k.yaml +3 -0
- fusion_bench_config/fabric/llama_peft_fsdp.yaml +16 -0
- fusion_bench_config/fabric/loggers/wandb_logger.yaml +2 -0
- fusion_bench_config/fabric/strategy/deepspeed.yaml +10 -0
- fusion_bench_config/fabric/strategy/llama_peft_fsdp.yaml +9 -0
- fusion_bench_config/fabric_model_fusion.yaml +1 -1
- fusion_bench_config/llama_full_finetune.yaml +19 -0
- fusion_bench_config/method/lm_finetune/bradley_terry_rm.yaml +47 -0
- fusion_bench_config/method/lm_finetune/fullfinetune_sft.yaml +11 -4
- fusion_bench_config/method/lm_finetune/peftfinetune_sft.yaml +4 -2
- fusion_bench_config/method/surgery/adamerging_surgery.yaml +27 -0
- fusion_bench_config/method/task_singular_vector/TaskSingularVectorMerging.yaml +2 -0
- fusion_bench_config/model/clip-vit/README.md +38 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_TALL14.yaml +22 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_TALL20.yaml +29 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_cifar10.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_cifar100.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_dtd.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_emnist_letters.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_eurosat.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_fashion_mnist.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_fer2013.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_food101.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_gtsrb.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_kmnist.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_mnist.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_oxford-iiit-pet.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_oxford_flowers102.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_pcam.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_rendered-sst2.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_resisc45.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_stanford-cars.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_stl10.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_sun397.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_svhn.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_TALL14.yaml +22 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_TALL20.yaml +29 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_cifar10.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_cifar100.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_dtd.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_eight_tasks.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_emnist_letters.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_eurosat.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_fashion_mnist.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_fer2013.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_food101.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_gtsrb.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_kmnist.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_mnist.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_oxford-iiit-pet.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_oxford_flowers102.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_pcam.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_rendered-sst2.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_resisc45.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_stanford-cars.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_stl10.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_sun397.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_svhn.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_TALL14.yaml +22 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_TALL20.yaml +29 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_cifar10.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_cifar100.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_dtd.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_emnist_letters.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_eurosat.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_fashion_mnist.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_fer2013.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_food101.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_gtsrb.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_kmnist.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_mnist.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_oxford-iiit-pet.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_oxford_flowers102.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_pcam.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_rendered-sst2.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_resisc45.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_stanford-cars.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_stl10.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_sun397.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_svhn.yaml +1 -3
- fusion_bench_config/model/clip-vit/download_TALL20_models.sh +6 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TA8_model_only.yaml +6 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TALL14.yaml +11 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TALL14_model_only.yaml +9 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TALL20.yaml +11 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TALL20_model_only.yaml +9 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_individual.yaml +15 -3
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TALL14.yaml +8 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TALL14_model_only.yaml +6 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TALL20.yaml +8 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TALL20_model_only.yaml +6 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_individual.yaml +9 -3
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_single_task_projection.yaml +15 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TALL14.yaml +11 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TALL14_model_only.yaml +9 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TALL20.yaml +11 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TALL20_model_only.yaml +9 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_individual.yaml +15 -3
- fusion_bench_config/modelpool/CausalLMPool/llama_alpaca_cleaned.yaml +21 -0
- fusion_bench_config/modelpool/CausalLMPool/llama_codealpaca.yaml +21 -0
- fusion_bench_config/modelpool/CausalLMPool/llama_metamathqa.yaml +19 -0
- fusion_bench_config/modelpool/CausalLMPool/llama_ultrachat.yaml +18 -0
- fusion_bench_config/modelpool/SeqenceClassificationModelPool/llama_preference700k.yaml +23 -0
- fusion_bench_config/modelpool/SeqenceClassificationModelPool/single_reward_model.yaml +14 -0
- fusion_bench_config/nyuv2_config.yaml +5 -1
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-base-patch32_robustness_corrupted.yaml +27 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-classification_TALL14.yaml +19 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-classification_TALL20.yaml +26 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_cifar10.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_cifar100.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_dtd.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_emnist_letters.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_eurosat.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_fashion_mnist.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_fer2013.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_food101.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_gtsrb.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_kmnist.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_mnist.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_oxford-iiit-pet.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_oxford_flowers102.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_oxford_flowers102_val.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_pcam.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_rendered-sst2.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_resisc45.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_stanford-cars.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_stl10.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_sun397.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_svhn.yaml +3 -0
- fusion_bench_config/taskpool/reward_model_evaluation.yaml +18 -0
- fusion_bench_config/llama_weighted_average.yaml +0 -26
- {fusion_bench-0.2.6.dist-info → fusion_bench-0.2.8.dist-info}/LICENSE +0 -0
- {fusion_bench-0.2.6.dist-info → fusion_bench-0.2.8.dist-info}/WHEEL +0 -0
- {fusion_bench-0.2.6.dist-info → fusion_bench-0.2.8.dist-info}/entry_points.txt +0 -0
- {fusion_bench-0.2.6.dist-info → fusion_bench-0.2.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
fashion_mnist: tanganke/clip-vit-base-patch16_fashion_mnist
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
fer2013: tanganke/clip-vit-base-patch16_fer2013
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
food101: tanganke/clip-vit-base-patch16_food101
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
kmnist: tanganke/clip-vit-base-patch16_kmnist
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
oxford-iiit-pet: tanganke/clip-vit-base-patch16_oxford-iiit-pet
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
oxford_flowers102: tanganke/clip-vit-base-patch16_oxford_flowers102
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
pcam: tanganke/clip-vit-base-patch16_pcam
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
rendered-sst2: tanganke/clip-vit-base-patch16_rendered-sst2
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
stl10: tanganke/clip-vit-base-patch16_stl10
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
# The 14 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
# pre-trained model
|
|
6
|
+
- clip-vit-base-patch32
|
|
7
|
+
# eight tasks in the task arithmetic paper
|
|
8
|
+
- clip-vit-base-patch32_sun397
|
|
9
|
+
- clip-vit-base-patch32_stanford-cars
|
|
10
|
+
- clip-vit-base-patch32_resisc45
|
|
11
|
+
- clip-vit-base-patch32_eurosat
|
|
12
|
+
- clip-vit-base-patch32_svhn
|
|
13
|
+
- clip-vit-base-patch32_gtsrb
|
|
14
|
+
- clip-vit-base-patch32_mnist
|
|
15
|
+
- clip-vit-base-patch32_dtd
|
|
16
|
+
# additional 6 tasks in the TALL mask paper
|
|
17
|
+
- clip-vit-base-patch32_oxford_flowers102
|
|
18
|
+
- clip-vit-base-patch32_pcam
|
|
19
|
+
- clip-vit-base-patch32_fer2013
|
|
20
|
+
- clip-vit-base-patch32_oxford-iiit-pet
|
|
21
|
+
- clip-vit-base-patch32_stl10
|
|
22
|
+
- clip-vit-base-patch32_cifar100
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# The 20 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
# pre-trained model
|
|
6
|
+
- clip-vit-base-patch32
|
|
7
|
+
# eight tasks in the task arithmetic paper
|
|
8
|
+
- clip-vit-base-patch32_sun397
|
|
9
|
+
- clip-vit-base-patch32_stanford-cars
|
|
10
|
+
- clip-vit-base-patch32_resisc45
|
|
11
|
+
- clip-vit-base-patch32_eurosat
|
|
12
|
+
- clip-vit-base-patch32_svhn
|
|
13
|
+
- clip-vit-base-patch32_gtsrb
|
|
14
|
+
- clip-vit-base-patch32_mnist
|
|
15
|
+
- clip-vit-base-patch32_dtd
|
|
16
|
+
# additional 6 tasks in the TALL mask paper (TALL 14)
|
|
17
|
+
- clip-vit-base-patch32_oxford_flowers102
|
|
18
|
+
- clip-vit-base-patch32_pcam
|
|
19
|
+
- clip-vit-base-patch32_fer2013
|
|
20
|
+
- clip-vit-base-patch32_oxford-iiit-pet
|
|
21
|
+
- clip-vit-base-patch32_stl10
|
|
22
|
+
- clip-vit-base-patch32_cifar100
|
|
23
|
+
# additional 6 tasks in the TALL mask paper (TALL 20)
|
|
24
|
+
- clip-vit-base-patch32_cifar10
|
|
25
|
+
- clip-vit-base-patch32_food101
|
|
26
|
+
- clip-vit-base-patch32_fashion_mnist
|
|
27
|
+
- clip-vit-base-patch32_emnist_letters
|
|
28
|
+
- clip-vit-base-patch32_kmnist
|
|
29
|
+
- clip-vit-base-patch32_rendered-sst2
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
cifar10: tanganke/clip-vit-base-patch32_cifar10
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
cifar100: tanganke/clip-vit-base-patch32_cifar100
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
emnist_letters: tanganke/clip-vit-base-patch32_emnist_letters
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
fashion_mnist: tanganke/clip-vit-base-patch32_fashion_mnist
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
fer2013: tanganke/clip-vit-base-patch32_fer2013
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
food101: tanganke/clip-vit-base-patch32_food101
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
kmnist: tanganke/clip-vit-base-patch32_kmnist
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
oxford-iiit-pet: tanganke/clip-vit-base-patch32_oxford-iiit-pet
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
oxford_flowers102: tanganke/clip-vit-base-patch32_oxford_flowers102
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
pcam: tanganke/clip-vit-base-patch32_pcam
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
rendered-sst2: tanganke/clip-vit-base-patch32_rendered-sst2
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
stl10: tanganke/clip-vit-base-patch32_stl10
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
# The 14 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
# pre-trained model
|
|
6
|
+
- clip-vit-large-patch14
|
|
7
|
+
# eight tasks in the task arithmetic paper
|
|
8
|
+
- clip-vit-large-patch14_sun397
|
|
9
|
+
- clip-vit-large-patch14_stanford-cars
|
|
10
|
+
- clip-vit-large-patch14_resisc45
|
|
11
|
+
- clip-vit-large-patch14_eurosat
|
|
12
|
+
- clip-vit-large-patch14_svhn
|
|
13
|
+
- clip-vit-large-patch14_gtsrb
|
|
14
|
+
- clip-vit-large-patch14_mnist
|
|
15
|
+
- clip-vit-large-patch14_dtd
|
|
16
|
+
# additional 6 tasks in the TALL mask paper
|
|
17
|
+
- clip-vit-large-patch14_oxford_flowers102
|
|
18
|
+
- clip-vit-large-patch14_pcam
|
|
19
|
+
- clip-vit-large-patch14_fer2013
|
|
20
|
+
- clip-vit-large-patch14_oxford-iiit-pet
|
|
21
|
+
- clip-vit-large-patch14_stl10
|
|
22
|
+
- clip-vit-large-patch14_cifar100
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# The 20 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
# pre-trained model
|
|
6
|
+
- clip-vit-large-patch14
|
|
7
|
+
# eight tasks in the task arithmetic paper
|
|
8
|
+
- clip-vit-large-patch14_sun397
|
|
9
|
+
- clip-vit-large-patch14_stanford-cars
|
|
10
|
+
- clip-vit-large-patch14_resisc45
|
|
11
|
+
- clip-vit-large-patch14_eurosat
|
|
12
|
+
- clip-vit-large-patch14_svhn
|
|
13
|
+
- clip-vit-large-patch14_gtsrb
|
|
14
|
+
- clip-vit-large-patch14_mnist
|
|
15
|
+
- clip-vit-large-patch14_dtd
|
|
16
|
+
# additional 6 tasks in the TALL mask paper (TALL 14)
|
|
17
|
+
- clip-vit-large-patch14_oxford_flowers102
|
|
18
|
+
- clip-vit-large-patch14_pcam
|
|
19
|
+
- clip-vit-large-patch14_fer2013
|
|
20
|
+
- clip-vit-large-patch14_oxford-iiit-pet
|
|
21
|
+
- clip-vit-large-patch14_stl10
|
|
22
|
+
- clip-vit-large-patch14_cifar100
|
|
23
|
+
# additional 6 tasks in the TALL mask paper (TALL 20)
|
|
24
|
+
- clip-vit-large-patch14_cifar10
|
|
25
|
+
- clip-vit-large-patch14_food101
|
|
26
|
+
- clip-vit-large-patch14_fashion_mnist
|
|
27
|
+
- clip-vit-large-patch14_emnist_letters
|
|
28
|
+
- clip-vit-large-patch14_kmnist
|
|
29
|
+
- clip-vit-large-patch14_rendered-sst2
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
cifar10: tanganke/clip-vit-large-patch14_cifar10
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
cifar100: tanganke/clip-vit-large-patch14_cifar100
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
emnist_letters: tanganke/clip-vit-large-patch14_emnist_letters
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
fashion_mnist: tanganke/clip-vit-large-patch14_fashion_mnist
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
fer2013: tanganke/clip-vit-large-patch14_fer2013
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
food101: tanganke/clip-vit-large-patch14_food101
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
kmnist: tanganke/clip-vit-large-patch14_kmnist
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
oxford-iiit-pet: tanganke/clip-vit-large-patch14_oxford-iiit-pet
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
oxford_flowers102: tanganke/clip-vit-large-patch14_oxford_flowers102
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
pcam: tanganke/clip-vit-large-patch14_pcam
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
rendered-sst2: tanganke/clip-vit-large-patch14_rendered-sst2
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
stl10: tanganke/clip-vit-large-patch14_stl10
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
#! /bin/bash
|
|
2
|
+
for MODEL in clip-vit-base-patch32 clip-vit-base-patch16 clip-vit-large-patch14; do
|
|
3
|
+
for TASK in sun397 stanford-cars resisc45 eurosat svhn gtsrb mnist dtd oxford_flowers102 pcam fer2013 oxford-iiit-pet stl10 cifar100 cifar10 food101 fashion_mnist emnist_letters kmnist rendered-sst2; do
|
|
4
|
+
huggingface-cli download --local-dir tanganke/${MODEL}_${TASK} tanganke/${MODEL}_${TASK}
|
|
5
|
+
done
|
|
6
|
+
done
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# The 14 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
- CLIPVisionModelPool@: _template
|
|
6
|
+
- /model/clip-vit@models: clip-vit-base-patch16_TALL14
|
|
7
|
+
- /dataset/image_classification/train@train_datasets: TALL14
|
|
8
|
+
- /dataset/image_classification/test@test_datasets: TALL14
|
|
9
|
+
processor:
|
|
10
|
+
_target_: transformers.CLIPProcessor.from_pretrained
|
|
11
|
+
pretrained_model_name_or_path: openai/clip-vit-base-patch16
|
fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TALL14_model_only.yaml
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
# The 14 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
- CLIPVisionModelPool@: _template
|
|
6
|
+
- /model/clip-vit@models: clip-vit-base-patch16_TALL14
|
|
7
|
+
processor:
|
|
8
|
+
_target_: transformers.CLIPProcessor.from_pretrained
|
|
9
|
+
pretrained_model_name_or_path: openai/clip-vit-base-patch16
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# The 20 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
- CLIPVisionModelPool@: _template
|
|
6
|
+
- /model/clip-vit@models: clip-vit-base-patch16_TALL20
|
|
7
|
+
- /dataset/image_classification/train@train_datasets: TALL20
|
|
8
|
+
- /dataset/image_classification/test@test_datasets: TALL20
|
|
9
|
+
processor:
|
|
10
|
+
_target_: transformers.CLIPProcessor.from_pretrained
|
|
11
|
+
pretrained_model_name_or_path: openai/clip-vit-base-patch16
|
fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TALL20_model_only.yaml
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
# The 20 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
- CLIPVisionModelPool@: _template
|
|
6
|
+
- /model/clip-vit@models: clip-vit-base-patch16_TALL20
|
|
7
|
+
processor:
|
|
8
|
+
_target_: transformers.CLIPProcessor.from_pretrained
|
|
9
|
+
pretrained_model_name_or_path: openai/clip-vit-base-patch16
|
|
@@ -1,7 +1,19 @@
|
|
|
1
|
+
# This is useful for evluate the performance of a single clip vision model
|
|
2
|
+
#
|
|
3
|
+
# fusion_bench \
|
|
4
|
+
# modelpool=CLIPVisionModelPool/clip-vit-base-patch16_individual \
|
|
5
|
+
# modelpool.base_model=${MODEL_PATH}
|
|
6
|
+
# ...
|
|
1
7
|
defaults:
|
|
2
8
|
- CLIPVisionModelPool@: _template
|
|
3
|
-
|
|
4
|
-
|
|
9
|
+
|
|
10
|
+
models:
|
|
11
|
+
_pretrained_:
|
|
12
|
+
_target_: transformers.CLIPVisionModel.from_pretrained
|
|
13
|
+
pretrained_model_name_or_path: ${...base_model}
|
|
14
|
+
|
|
5
15
|
processor:
|
|
6
16
|
_target_: transformers.CLIPProcessor.from_pretrained
|
|
7
|
-
pretrained_model_name_or_path:
|
|
17
|
+
pretrained_model_name_or_path: ${..base_model}
|
|
18
|
+
|
|
19
|
+
base_model: openai/clip-vit-base-patch16
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
# The 14 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
- CLIPVisionModelPool@: _template
|
|
6
|
+
- /model/clip-vit@models: clip-vit-base-patch32_TALL14
|
|
7
|
+
- /dataset/image_classification/train@train_datasets: TALL14
|
|
8
|
+
- /dataset/image_classification/test@test_datasets: TALL14
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
# The 20 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
- CLIPVisionModelPool@: _template
|
|
6
|
+
- /model/clip-vit@models: clip-vit-base-patch32_TALL20
|
|
7
|
+
- /dataset/image_classification/train@train_datasets: TALL20
|
|
8
|
+
- /dataset/image_classification/test@test_datasets: TALL20
|
|
@@ -1,7 +1,13 @@
|
|
|
1
1
|
defaults:
|
|
2
2
|
- CLIPVisionModelPool@: _template
|
|
3
|
-
|
|
4
|
-
|
|
3
|
+
|
|
4
|
+
models:
|
|
5
|
+
_pretrained_:
|
|
6
|
+
_target_: transformers.CLIPVisionModel.from_pretrained
|
|
7
|
+
pretrained_model_name_or_path: ${...base_model}
|
|
8
|
+
|
|
5
9
|
processor:
|
|
6
10
|
_target_: transformers.CLIPProcessor.from_pretrained
|
|
7
|
-
pretrained_model_name_or_path:
|
|
11
|
+
pretrained_model_name_or_path: ${..base_model}
|
|
12
|
+
|
|
13
|
+
base_model: openai/clip-vit-base-patch32
|
fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_single_task_projection.yaml
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
defaults:
|
|
2
|
+
- /model/clip-vit@models:
|
|
3
|
+
- clip-vit-base-patch32
|
|
4
|
+
- clip-vit-base-patch32_sun397
|
|
5
|
+
- clip-vit-base-patch32_stanford-cars
|
|
6
|
+
|
|
7
|
+
_target_: fusion_bench.modelpool.CLIPVisionModelPool
|
|
8
|
+
_recursive_: false
|
|
9
|
+
|
|
10
|
+
train_datasets: null
|
|
11
|
+
test_datasets: null
|
|
12
|
+
|
|
13
|
+
processor:
|
|
14
|
+
_target_: transformers.CLIPProcessor.from_pretrained
|
|
15
|
+
pretrained_model_name_or_path: openai/clip-vit-base-patch32
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# The 14 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
- CLIPVisionModelPool@: _template
|
|
6
|
+
- /model/clip-vit@models: clip-vit-large-patch14_TALL14
|
|
7
|
+
- /dataset/image_classification/train@train_datasets: TALL14
|
|
8
|
+
- /dataset/image_classification/test@test_datasets: TALL14
|
|
9
|
+
processor:
|
|
10
|
+
_target_: transformers.CLIPProcessor.from_pretrained
|
|
11
|
+
pretrained_model_name_or_path: openai/clip-vit-large-patch14
|
fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TALL14_model_only.yaml
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
# The 14 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
- CLIPVisionModelPool@: _template
|
|
6
|
+
- /model/clip-vit@models: clip-vit-large-patch14_TALL14
|
|
7
|
+
processor:
|
|
8
|
+
_target_: transformers.CLIPProcessor.from_pretrained
|
|
9
|
+
pretrained_model_name_or_path: openai/clip-vit-large-patch14
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# The 20 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
- CLIPVisionModelPool@: _template
|
|
6
|
+
- /model/clip-vit@models: clip-vit-large-patch14_TALL20
|
|
7
|
+
- /dataset/image_classification/train@train_datasets: TALL20
|
|
8
|
+
- /dataset/image_classification/test@test_datasets: TALL20
|
|
9
|
+
processor:
|
|
10
|
+
_target_: transformers.CLIPProcessor.from_pretrained
|
|
11
|
+
pretrained_model_name_or_path: openai/clip-vit-large-patch14
|
fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TALL20_model_only.yaml
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
# The 20 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
- CLIPVisionModelPool@: _template
|
|
6
|
+
- /model/clip-vit@models: clip-vit-large-patch14_TALL20
|
|
7
|
+
processor:
|
|
8
|
+
_target_: transformers.CLIPProcessor.from_pretrained
|
|
9
|
+
pretrained_model_name_or_path: openai/clip-vit-large-patch14
|
|
@@ -1,7 +1,19 @@
|
|
|
1
|
+
# This is useful for evluate the performance of a single clip vision model
|
|
2
|
+
#
|
|
3
|
+
# fusion_bench \
|
|
4
|
+
# modelpool=CLIPVisionModelPool/clip-vit-large-patch14_individual \
|
|
5
|
+
# modelpool.base_model=${MODEL_PATH}
|
|
6
|
+
# ...
|
|
1
7
|
defaults:
|
|
2
8
|
- CLIPVisionModelPool@: _template
|
|
3
|
-
|
|
4
|
-
|
|
9
|
+
|
|
10
|
+
models:
|
|
11
|
+
_pretrained_:
|
|
12
|
+
_target_: transformers.CLIPVisionModel.from_pretrained
|
|
13
|
+
pretrained_model_name_or_path: ${...base_model}
|
|
14
|
+
|
|
5
15
|
processor:
|
|
6
16
|
_target_: transformers.CLIPProcessor.from_pretrained
|
|
7
|
-
pretrained_model_name_or_path:
|
|
17
|
+
pretrained_model_name_or_path: ${..base_model}
|
|
18
|
+
|
|
19
|
+
base_model: openai/clip-vit-large-patch14
|