fusion-bench 0.2.7__py3-none-any.whl → 0.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fusion_bench/compat/method/base_algorithm.py +1 -1
- fusion_bench/dataset/clip_dataset.py +3 -0
- fusion_bench/dataset/fer2013.py +12 -0
- fusion_bench/dataset/llama/preference_700k.py +1 -1
- fusion_bench/method/__init__.py +2 -0
- fusion_bench/method/classification/clip_finetune.py +10 -13
- fusion_bench/method/surgery/__init__.py +1 -3
- fusion_bench/method/surgery/clip_layer_wise_adamerging_surgery.py +1 -1
- fusion_bench/method/tall_mask/__init__.py +0 -0
- fusion_bench/method/tall_mask/utils.py +234 -0
- fusion_bench/method/task_singular_vector/TSVC.py +16 -0
- fusion_bench/method/task_singular_vector/TSVM.py +63 -0
- fusion_bench/method/task_singular_vector/__init__.py +9 -0
- fusion_bench/method/task_singular_vector/utils/TSVC_utils.py +50 -0
- fusion_bench/method/task_singular_vector/utils/TSVM_utils.py +642 -0
- fusion_bench/method/task_singular_vector/utils/__init__.py +7 -0
- fusion_bench/method/ties_merging/ties_merging_utils.py +7 -2
- fusion_bench/mixins/clip_classification.py +6 -6
- fusion_bench/mixins/lightning_fabric.py +3 -1
- fusion_bench/modelpool/base_pool.py +0 -1
- fusion_bench/modelpool/clip_vision/modelpool.py +92 -8
- fusion_bench/models/surgery/__init__.py +1 -0
- fusion_bench/models/surgery/surgerymodelwrapper.py +2 -1
- fusion_bench/models/wrappers/layer_wise_fusion.py +1 -1
- fusion_bench/models/wrappers/task_wise_fusion.py +1 -1
- fusion_bench/programs/fabric_fusion_program.py +7 -4
- fusion_bench/taskpool/llama/reward_model.py +1 -1
- fusion_bench/tasks/clip_classification/__init__.py +13 -45
- fusion_bench/tasks/clip_classification/clip_dataset.py +1 -16
- fusion_bench/tasks/clip_classification/cub_200_2011.py +208 -0
- fusion_bench/tasks/clip_classification/emnist_letters.py +31 -0
- fusion_bench/tasks/clip_classification/emnist_mnist.py +5 -0
- fusion_bench/tasks/clip_classification/fashion_mnist.py +18 -0
- fusion_bench/tasks/clip_classification/fer2013.py +18 -0
- fusion_bench/tasks/clip_classification/food101.py +105 -0
- fusion_bench/tasks/clip_classification/kmnist.py +17 -0
- fusion_bench/tasks/clip_classification/mongo_leaf_disease.py +19 -0
- fusion_bench/tasks/clip_classification/pcam.py +5 -0
- fusion_bench/utils/parameters.py +12 -3
- fusion_bench/utils/type.py +10 -1
- {fusion_bench-0.2.7.dist-info → fusion_bench-0.2.8.dist-info}/METADATA +1 -1
- {fusion_bench-0.2.7.dist-info → fusion_bench-0.2.8.dist-info}/RECORD +195 -62
- fusion_bench_config/dataset/image_classification/README.md +6 -0
- fusion_bench_config/dataset/image_classification/test/TALL14.yaml +20 -0
- fusion_bench_config/dataset/image_classification/test/TALL20.yaml +28 -0
- fusion_bench_config/dataset/image_classification/test/cifar10.yaml +1 -1
- fusion_bench_config/dataset/image_classification/test/cifar100.yaml +1 -1
- fusion_bench_config/dataset/image_classification/test/cub-200-2011.yaml +4 -0
- fusion_bench_config/dataset/image_classification/test/emnist_letters.yaml +5 -0
- fusion_bench_config/dataset/image_classification/test/emnist_mnist.yaml +4 -0
- fusion_bench_config/dataset/image_classification/test/fashion_mnist.yaml +4 -0
- fusion_bench_config/dataset/image_classification/test/fer2013.yaml +3 -0
- fusion_bench_config/dataset/image_classification/test/food101.yaml +4 -0
- fusion_bench_config/dataset/image_classification/test/kmnist.yaml +4 -0
- fusion_bench_config/dataset/image_classification/test/mango-leaf-disease.yaml +4 -0
- fusion_bench_config/dataset/image_classification/test/oxford-iiit-pet.yaml +4 -0
- fusion_bench_config/dataset/image_classification/test/oxford_flowers102.yaml +4 -0
- fusion_bench_config/dataset/image_classification/test/pcam.yaml +4 -0
- fusion_bench_config/dataset/image_classification/test/rendered-sst2.yaml +4 -0
- fusion_bench_config/dataset/image_classification/test/stl10.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/TALL14.yaml +20 -0
- fusion_bench_config/dataset/image_classification/train/TALL20.yaml +28 -0
- fusion_bench_config/dataset/image_classification/train/cifar10.yaml +1 -1
- fusion_bench_config/dataset/image_classification/train/cifar100.yaml +1 -1
- fusion_bench_config/dataset/image_classification/train/cub-200-2011.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/emnist_letters.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/emnist_mnist.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/fashion_mnist.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/fer2013.yaml +3 -0
- fusion_bench_config/dataset/image_classification/train/food101.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/kmnist.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/mango-leaf-disease.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/oxford-iiit-pet.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/oxford_flowers102.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/pcam.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/rendered-sst2.yaml +4 -0
- fusion_bench_config/dataset/image_classification/train/stl10.yaml +4 -0
- fusion_bench_config/method/task_singular_vector/TaskSingularVectorMerging.yaml +2 -0
- fusion_bench_config/model/clip-vit/README.md +38 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_TALL14.yaml +22 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_TALL20.yaml +29 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_cifar10.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_cifar100.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_dtd.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_emnist_letters.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_eurosat.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_fashion_mnist.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_fer2013.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_food101.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_gtsrb.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_kmnist.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_mnist.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_oxford-iiit-pet.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_oxford_flowers102.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_pcam.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_rendered-sst2.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_resisc45.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_stanford-cars.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_stl10.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_sun397.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch16_svhn.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_TALL14.yaml +22 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_TALL20.yaml +29 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_cifar10.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_cifar100.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_dtd.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_eight_tasks.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_emnist_letters.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_eurosat.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_fashion_mnist.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_fer2013.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_food101.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_gtsrb.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_kmnist.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_mnist.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_oxford-iiit-pet.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_oxford_flowers102.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_pcam.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_rendered-sst2.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_resisc45.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_stanford-cars.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_stl10.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_sun397.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-base-patch32_svhn.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_TALL14.yaml +22 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_TALL20.yaml +29 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_cifar10.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_cifar100.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_dtd.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_emnist_letters.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_eurosat.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_fashion_mnist.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_fer2013.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_food101.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_gtsrb.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_kmnist.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_mnist.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_oxford-iiit-pet.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_oxford_flowers102.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_pcam.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_rendered-sst2.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_resisc45.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_stanford-cars.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_stl10.yaml +1 -0
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_sun397.yaml +1 -3
- fusion_bench_config/model/clip-vit/clip-vit-large-patch14_svhn.yaml +1 -3
- fusion_bench_config/model/clip-vit/download_TALL20_models.sh +6 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TA8_model_only.yaml +6 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TALL14.yaml +11 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TALL14_model_only.yaml +9 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TALL20.yaml +11 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TALL20_model_only.yaml +9 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_individual.yaml +15 -3
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TALL14.yaml +8 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TALL14_model_only.yaml +6 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TALL20.yaml +8 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TALL20_model_only.yaml +6 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_individual.yaml +9 -3
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_single_task_projection.yaml +15 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TALL14.yaml +11 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TALL14_model_only.yaml +9 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TALL20.yaml +11 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TALL20_model_only.yaml +9 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_individual.yaml +15 -3
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-base-patch32_robustness_corrupted.yaml +27 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-classification_TALL14.yaml +19 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-classification_TALL20.yaml +26 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_cifar10.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_cifar100.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_dtd.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_emnist_letters.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_eurosat.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_fashion_mnist.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_fer2013.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_food101.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_gtsrb.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_kmnist.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_mnist.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_oxford-iiit-pet.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_oxford_flowers102.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_oxford_flowers102_val.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_pcam.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_rendered-sst2.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_resisc45.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_stanford-cars.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_stl10.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_sun397.yaml +3 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_svhn.yaml +3 -0
- {fusion_bench-0.2.7.dist-info → fusion_bench-0.2.8.dist-info}/LICENSE +0 -0
- {fusion_bench-0.2.7.dist-info → fusion_bench-0.2.8.dist-info}/WHEEL +0 -0
- {fusion_bench-0.2.7.dist-info → fusion_bench-0.2.8.dist-info}/entry_points.txt +0 -0
- {fusion_bench-0.2.7.dist-info → fusion_bench-0.2.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
oxford_flowers102: tanganke/clip-vit-large-patch14_oxford_flowers102
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
pcam: tanganke/clip-vit-large-patch14_pcam
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
rendered-sst2: tanganke/clip-vit-large-patch14_rendered-sst2
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
stl10: tanganke/clip-vit-large-patch14_stl10
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
#! /bin/bash
|
|
2
|
+
for MODEL in clip-vit-base-patch32 clip-vit-base-patch16 clip-vit-large-patch14; do
|
|
3
|
+
for TASK in sun397 stanford-cars resisc45 eurosat svhn gtsrb mnist dtd oxford_flowers102 pcam fer2013 oxford-iiit-pet stl10 cifar100 cifar10 food101 fashion_mnist emnist_letters kmnist rendered-sst2; do
|
|
4
|
+
huggingface-cli download --local-dir tanganke/${MODEL}_${TASK} tanganke/${MODEL}_${TASK}
|
|
5
|
+
done
|
|
6
|
+
done
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# The 14 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
- CLIPVisionModelPool@: _template
|
|
6
|
+
- /model/clip-vit@models: clip-vit-base-patch16_TALL14
|
|
7
|
+
- /dataset/image_classification/train@train_datasets: TALL14
|
|
8
|
+
- /dataset/image_classification/test@test_datasets: TALL14
|
|
9
|
+
processor:
|
|
10
|
+
_target_: transformers.CLIPProcessor.from_pretrained
|
|
11
|
+
pretrained_model_name_or_path: openai/clip-vit-base-patch16
|
fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TALL14_model_only.yaml
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
# The 14 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
- CLIPVisionModelPool@: _template
|
|
6
|
+
- /model/clip-vit@models: clip-vit-base-patch16_TALL14
|
|
7
|
+
processor:
|
|
8
|
+
_target_: transformers.CLIPProcessor.from_pretrained
|
|
9
|
+
pretrained_model_name_or_path: openai/clip-vit-base-patch16
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# The 20 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
- CLIPVisionModelPool@: _template
|
|
6
|
+
- /model/clip-vit@models: clip-vit-base-patch16_TALL20
|
|
7
|
+
- /dataset/image_classification/train@train_datasets: TALL20
|
|
8
|
+
- /dataset/image_classification/test@test_datasets: TALL20
|
|
9
|
+
processor:
|
|
10
|
+
_target_: transformers.CLIPProcessor.from_pretrained
|
|
11
|
+
pretrained_model_name_or_path: openai/clip-vit-base-patch16
|
fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TALL20_model_only.yaml
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
# The 20 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
- CLIPVisionModelPool@: _template
|
|
6
|
+
- /model/clip-vit@models: clip-vit-base-patch16_TALL20
|
|
7
|
+
processor:
|
|
8
|
+
_target_: transformers.CLIPProcessor.from_pretrained
|
|
9
|
+
pretrained_model_name_or_path: openai/clip-vit-base-patch16
|
|
@@ -1,7 +1,19 @@
|
|
|
1
|
+
# This is useful for evluate the performance of a single clip vision model
|
|
2
|
+
#
|
|
3
|
+
# fusion_bench \
|
|
4
|
+
# modelpool=CLIPVisionModelPool/clip-vit-base-patch16_individual \
|
|
5
|
+
# modelpool.base_model=${MODEL_PATH}
|
|
6
|
+
# ...
|
|
1
7
|
defaults:
|
|
2
8
|
- CLIPVisionModelPool@: _template
|
|
3
|
-
|
|
4
|
-
|
|
9
|
+
|
|
10
|
+
models:
|
|
11
|
+
_pretrained_:
|
|
12
|
+
_target_: transformers.CLIPVisionModel.from_pretrained
|
|
13
|
+
pretrained_model_name_or_path: ${...base_model}
|
|
14
|
+
|
|
5
15
|
processor:
|
|
6
16
|
_target_: transformers.CLIPProcessor.from_pretrained
|
|
7
|
-
pretrained_model_name_or_path:
|
|
17
|
+
pretrained_model_name_or_path: ${..base_model}
|
|
18
|
+
|
|
19
|
+
base_model: openai/clip-vit-base-patch16
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
# The 14 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
- CLIPVisionModelPool@: _template
|
|
6
|
+
- /model/clip-vit@models: clip-vit-base-patch32_TALL14
|
|
7
|
+
- /dataset/image_classification/train@train_datasets: TALL14
|
|
8
|
+
- /dataset/image_classification/test@test_datasets: TALL14
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
# The 20 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
- CLIPVisionModelPool@: _template
|
|
6
|
+
- /model/clip-vit@models: clip-vit-base-patch32_TALL20
|
|
7
|
+
- /dataset/image_classification/train@train_datasets: TALL20
|
|
8
|
+
- /dataset/image_classification/test@test_datasets: TALL20
|
|
@@ -1,7 +1,13 @@
|
|
|
1
1
|
defaults:
|
|
2
2
|
- CLIPVisionModelPool@: _template
|
|
3
|
-
|
|
4
|
-
|
|
3
|
+
|
|
4
|
+
models:
|
|
5
|
+
_pretrained_:
|
|
6
|
+
_target_: transformers.CLIPVisionModel.from_pretrained
|
|
7
|
+
pretrained_model_name_or_path: ${...base_model}
|
|
8
|
+
|
|
5
9
|
processor:
|
|
6
10
|
_target_: transformers.CLIPProcessor.from_pretrained
|
|
7
|
-
pretrained_model_name_or_path:
|
|
11
|
+
pretrained_model_name_or_path: ${..base_model}
|
|
12
|
+
|
|
13
|
+
base_model: openai/clip-vit-base-patch32
|
fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_single_task_projection.yaml
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
defaults:
|
|
2
|
+
- /model/clip-vit@models:
|
|
3
|
+
- clip-vit-base-patch32
|
|
4
|
+
- clip-vit-base-patch32_sun397
|
|
5
|
+
- clip-vit-base-patch32_stanford-cars
|
|
6
|
+
|
|
7
|
+
_target_: fusion_bench.modelpool.CLIPVisionModelPool
|
|
8
|
+
_recursive_: false
|
|
9
|
+
|
|
10
|
+
train_datasets: null
|
|
11
|
+
test_datasets: null
|
|
12
|
+
|
|
13
|
+
processor:
|
|
14
|
+
_target_: transformers.CLIPProcessor.from_pretrained
|
|
15
|
+
pretrained_model_name_or_path: openai/clip-vit-base-patch32
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# The 14 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
- CLIPVisionModelPool@: _template
|
|
6
|
+
- /model/clip-vit@models: clip-vit-large-patch14_TALL14
|
|
7
|
+
- /dataset/image_classification/train@train_datasets: TALL14
|
|
8
|
+
- /dataset/image_classification/test@test_datasets: TALL14
|
|
9
|
+
processor:
|
|
10
|
+
_target_: transformers.CLIPProcessor.from_pretrained
|
|
11
|
+
pretrained_model_name_or_path: openai/clip-vit-large-patch14
|
fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TALL14_model_only.yaml
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
# The 14 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
- CLIPVisionModelPool@: _template
|
|
6
|
+
- /model/clip-vit@models: clip-vit-large-patch14_TALL14
|
|
7
|
+
processor:
|
|
8
|
+
_target_: transformers.CLIPProcessor.from_pretrained
|
|
9
|
+
pretrained_model_name_or_path: openai/clip-vit-large-patch14
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# The 20 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
- CLIPVisionModelPool@: _template
|
|
6
|
+
- /model/clip-vit@models: clip-vit-large-patch14_TALL20
|
|
7
|
+
- /dataset/image_classification/train@train_datasets: TALL20
|
|
8
|
+
- /dataset/image_classification/test@test_datasets: TALL20
|
|
9
|
+
processor:
|
|
10
|
+
_target_: transformers.CLIPProcessor.from_pretrained
|
|
11
|
+
pretrained_model_name_or_path: openai/clip-vit-large-patch14
|
fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TALL20_model_only.yaml
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
# The 20 task used in the paper:
|
|
2
|
+
# Wang et al. Localizing Task Information for Improved Model Merging and Compression
|
|
3
|
+
# http://arxiv.org/abs/2405.07813
|
|
4
|
+
defaults:
|
|
5
|
+
- CLIPVisionModelPool@: _template
|
|
6
|
+
- /model/clip-vit@models: clip-vit-large-patch14_TALL20
|
|
7
|
+
processor:
|
|
8
|
+
_target_: transformers.CLIPProcessor.from_pretrained
|
|
9
|
+
pretrained_model_name_or_path: openai/clip-vit-large-patch14
|
|
@@ -1,7 +1,19 @@
|
|
|
1
|
+
# This is useful for evluate the performance of a single clip vision model
|
|
2
|
+
#
|
|
3
|
+
# fusion_bench \
|
|
4
|
+
# modelpool=CLIPVisionModelPool/clip-vit-large-patch14_individual \
|
|
5
|
+
# modelpool.base_model=${MODEL_PATH}
|
|
6
|
+
# ...
|
|
1
7
|
defaults:
|
|
2
8
|
- CLIPVisionModelPool@: _template
|
|
3
|
-
|
|
4
|
-
|
|
9
|
+
|
|
10
|
+
models:
|
|
11
|
+
_pretrained_:
|
|
12
|
+
_target_: transformers.CLIPVisionModel.from_pretrained
|
|
13
|
+
pretrained_model_name_or_path: ${...base_model}
|
|
14
|
+
|
|
5
15
|
processor:
|
|
6
16
|
_target_: transformers.CLIPProcessor.from_pretrained
|
|
7
|
-
pretrained_model_name_or_path:
|
|
17
|
+
pretrained_model_name_or_path: ${..base_model}
|
|
18
|
+
|
|
19
|
+
base_model: openai/clip-vit-large-patch14
|
fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-base-patch32_robustness_corrupted.yaml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
type: clip_vit_classification
|
|
2
|
+
name: clip-vit-robustness_clean
|
|
3
|
+
# corrption can be one of:
|
|
4
|
+
# contrast, gaussian_noise, impulse_noise, jpeg_compression, motion_blur, pixelate, spatter
|
|
5
|
+
corruption: ${corruption}
|
|
6
|
+
dataset_type: huggingface_image_classification
|
|
7
|
+
tasks:
|
|
8
|
+
- name: stanford_cars
|
|
9
|
+
dataset:
|
|
10
|
+
name: tanganke/stanford_cars
|
|
11
|
+
split: ${taskpool.corruption}
|
|
12
|
+
- name: eurosat
|
|
13
|
+
dataset:
|
|
14
|
+
name: tanganke/eurosat
|
|
15
|
+
split: ${taskpool.corruption}
|
|
16
|
+
- name: resisc45
|
|
17
|
+
dataset:
|
|
18
|
+
name: tanganke/resisc45
|
|
19
|
+
split: ${taskpool.corruption}
|
|
20
|
+
- name: gtsrb
|
|
21
|
+
dataset:
|
|
22
|
+
name: tanganke/gtsrb
|
|
23
|
+
split: ${taskpool.corruption}
|
|
24
|
+
clip_model: openai/clip-vit-base-patch32
|
|
25
|
+
batch_size: 128
|
|
26
|
+
num_workers: 16
|
|
27
|
+
fast_dev_run: ${fast_dev_run}
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
defaults:
|
|
2
|
+
- CLIPVisionModelTaskPool@: _template
|
|
3
|
+
- /dataset/image_classification/test@test_datasets:
|
|
4
|
+
# eight tasks in the task arithmetic paper
|
|
5
|
+
- sun397
|
|
6
|
+
- stanford-cars
|
|
7
|
+
- resisc45
|
|
8
|
+
- eurosat
|
|
9
|
+
- svhn
|
|
10
|
+
- gtsrb
|
|
11
|
+
- mnist
|
|
12
|
+
- dtd
|
|
13
|
+
# additional 6 tasks in the TALL mask paper (TALL 14)
|
|
14
|
+
- oxford_flowers102
|
|
15
|
+
- pcam
|
|
16
|
+
- fer2013
|
|
17
|
+
- oxford-iiit-pet
|
|
18
|
+
- stl10
|
|
19
|
+
- cifar100
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
defaults:
|
|
2
|
+
- CLIPVisionModelTaskPool@: _template
|
|
3
|
+
- /dataset/image_classification/test@test_datasets:
|
|
4
|
+
# eight tasks in the task arithmetic paper
|
|
5
|
+
- sun397
|
|
6
|
+
- stanford-cars
|
|
7
|
+
- resisc45
|
|
8
|
+
- eurosat
|
|
9
|
+
- svhn
|
|
10
|
+
- gtsrb
|
|
11
|
+
- mnist
|
|
12
|
+
- dtd
|
|
13
|
+
# additional 6 tasks in the TALL mask paper (TALL 14)
|
|
14
|
+
- oxford_flowers102
|
|
15
|
+
- pcam
|
|
16
|
+
- fer2013
|
|
17
|
+
- oxford-iiit-pet
|
|
18
|
+
- stl10
|
|
19
|
+
- cifar100
|
|
20
|
+
# additional 6 tasks in the TALL mask paper (TALL 20)
|
|
21
|
+
- cifar10
|
|
22
|
+
- food101
|
|
23
|
+
- fashion_mnist
|
|
24
|
+
- emnist_letters
|
|
25
|
+
- kmnist
|
|
26
|
+
- rendered-sst2
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|