fusion-bench 0.2.7__py3-none-any.whl → 0.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (195) hide show
  1. fusion_bench/compat/method/base_algorithm.py +1 -1
  2. fusion_bench/dataset/clip_dataset.py +3 -0
  3. fusion_bench/dataset/fer2013.py +12 -0
  4. fusion_bench/dataset/llama/preference_700k.py +1 -1
  5. fusion_bench/method/__init__.py +2 -0
  6. fusion_bench/method/classification/clip_finetune.py +10 -13
  7. fusion_bench/method/surgery/__init__.py +1 -3
  8. fusion_bench/method/surgery/clip_layer_wise_adamerging_surgery.py +1 -1
  9. fusion_bench/method/tall_mask/__init__.py +0 -0
  10. fusion_bench/method/tall_mask/utils.py +234 -0
  11. fusion_bench/method/task_singular_vector/TSVC.py +16 -0
  12. fusion_bench/method/task_singular_vector/TSVM.py +63 -0
  13. fusion_bench/method/task_singular_vector/__init__.py +9 -0
  14. fusion_bench/method/task_singular_vector/utils/TSVC_utils.py +50 -0
  15. fusion_bench/method/task_singular_vector/utils/TSVM_utils.py +642 -0
  16. fusion_bench/method/task_singular_vector/utils/__init__.py +7 -0
  17. fusion_bench/method/ties_merging/ties_merging_utils.py +7 -2
  18. fusion_bench/mixins/clip_classification.py +6 -6
  19. fusion_bench/mixins/lightning_fabric.py +3 -1
  20. fusion_bench/modelpool/base_pool.py +0 -1
  21. fusion_bench/modelpool/clip_vision/modelpool.py +92 -8
  22. fusion_bench/models/surgery/__init__.py +1 -0
  23. fusion_bench/models/surgery/surgerymodelwrapper.py +2 -1
  24. fusion_bench/models/wrappers/layer_wise_fusion.py +1 -1
  25. fusion_bench/models/wrappers/task_wise_fusion.py +1 -1
  26. fusion_bench/programs/fabric_fusion_program.py +7 -4
  27. fusion_bench/taskpool/llama/reward_model.py +1 -1
  28. fusion_bench/tasks/clip_classification/__init__.py +13 -45
  29. fusion_bench/tasks/clip_classification/clip_dataset.py +1 -16
  30. fusion_bench/tasks/clip_classification/cub_200_2011.py +208 -0
  31. fusion_bench/tasks/clip_classification/emnist_letters.py +31 -0
  32. fusion_bench/tasks/clip_classification/emnist_mnist.py +5 -0
  33. fusion_bench/tasks/clip_classification/fashion_mnist.py +18 -0
  34. fusion_bench/tasks/clip_classification/fer2013.py +18 -0
  35. fusion_bench/tasks/clip_classification/food101.py +105 -0
  36. fusion_bench/tasks/clip_classification/kmnist.py +17 -0
  37. fusion_bench/tasks/clip_classification/mongo_leaf_disease.py +19 -0
  38. fusion_bench/tasks/clip_classification/pcam.py +5 -0
  39. fusion_bench/utils/parameters.py +12 -3
  40. fusion_bench/utils/type.py +10 -1
  41. {fusion_bench-0.2.7.dist-info → fusion_bench-0.2.8.dist-info}/METADATA +1 -1
  42. {fusion_bench-0.2.7.dist-info → fusion_bench-0.2.8.dist-info}/RECORD +195 -62
  43. fusion_bench_config/dataset/image_classification/README.md +6 -0
  44. fusion_bench_config/dataset/image_classification/test/TALL14.yaml +20 -0
  45. fusion_bench_config/dataset/image_classification/test/TALL20.yaml +28 -0
  46. fusion_bench_config/dataset/image_classification/test/cifar10.yaml +1 -1
  47. fusion_bench_config/dataset/image_classification/test/cifar100.yaml +1 -1
  48. fusion_bench_config/dataset/image_classification/test/cub-200-2011.yaml +4 -0
  49. fusion_bench_config/dataset/image_classification/test/emnist_letters.yaml +5 -0
  50. fusion_bench_config/dataset/image_classification/test/emnist_mnist.yaml +4 -0
  51. fusion_bench_config/dataset/image_classification/test/fashion_mnist.yaml +4 -0
  52. fusion_bench_config/dataset/image_classification/test/fer2013.yaml +3 -0
  53. fusion_bench_config/dataset/image_classification/test/food101.yaml +4 -0
  54. fusion_bench_config/dataset/image_classification/test/kmnist.yaml +4 -0
  55. fusion_bench_config/dataset/image_classification/test/mango-leaf-disease.yaml +4 -0
  56. fusion_bench_config/dataset/image_classification/test/oxford-iiit-pet.yaml +4 -0
  57. fusion_bench_config/dataset/image_classification/test/oxford_flowers102.yaml +4 -0
  58. fusion_bench_config/dataset/image_classification/test/pcam.yaml +4 -0
  59. fusion_bench_config/dataset/image_classification/test/rendered-sst2.yaml +4 -0
  60. fusion_bench_config/dataset/image_classification/test/stl10.yaml +4 -0
  61. fusion_bench_config/dataset/image_classification/train/TALL14.yaml +20 -0
  62. fusion_bench_config/dataset/image_classification/train/TALL20.yaml +28 -0
  63. fusion_bench_config/dataset/image_classification/train/cifar10.yaml +1 -1
  64. fusion_bench_config/dataset/image_classification/train/cifar100.yaml +1 -1
  65. fusion_bench_config/dataset/image_classification/train/cub-200-2011.yaml +4 -0
  66. fusion_bench_config/dataset/image_classification/train/emnist_letters.yaml +4 -0
  67. fusion_bench_config/dataset/image_classification/train/emnist_mnist.yaml +4 -0
  68. fusion_bench_config/dataset/image_classification/train/fashion_mnist.yaml +4 -0
  69. fusion_bench_config/dataset/image_classification/train/fer2013.yaml +3 -0
  70. fusion_bench_config/dataset/image_classification/train/food101.yaml +4 -0
  71. fusion_bench_config/dataset/image_classification/train/kmnist.yaml +4 -0
  72. fusion_bench_config/dataset/image_classification/train/mango-leaf-disease.yaml +4 -0
  73. fusion_bench_config/dataset/image_classification/train/oxford-iiit-pet.yaml +4 -0
  74. fusion_bench_config/dataset/image_classification/train/oxford_flowers102.yaml +4 -0
  75. fusion_bench_config/dataset/image_classification/train/pcam.yaml +4 -0
  76. fusion_bench_config/dataset/image_classification/train/rendered-sst2.yaml +4 -0
  77. fusion_bench_config/dataset/image_classification/train/stl10.yaml +4 -0
  78. fusion_bench_config/method/task_singular_vector/TaskSingularVectorMerging.yaml +2 -0
  79. fusion_bench_config/model/clip-vit/README.md +38 -0
  80. fusion_bench_config/model/clip-vit/clip-vit-base-patch16.yaml +1 -3
  81. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_TALL14.yaml +22 -0
  82. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_TALL20.yaml +29 -0
  83. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_cifar10.yaml +1 -0
  84. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_cifar100.yaml +1 -0
  85. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_dtd.yaml +1 -3
  86. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_emnist_letters.yaml +1 -0
  87. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_eurosat.yaml +1 -3
  88. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_fashion_mnist.yaml +1 -0
  89. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_fer2013.yaml +1 -0
  90. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_food101.yaml +1 -0
  91. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_gtsrb.yaml +1 -3
  92. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_kmnist.yaml +1 -0
  93. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_mnist.yaml +1 -3
  94. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_oxford-iiit-pet.yaml +1 -0
  95. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_oxford_flowers102.yaml +1 -0
  96. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_pcam.yaml +1 -0
  97. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_rendered-sst2.yaml +1 -0
  98. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_resisc45.yaml +1 -3
  99. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_stanford-cars.yaml +1 -3
  100. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_stl10.yaml +1 -0
  101. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_sun397.yaml +1 -3
  102. fusion_bench_config/model/clip-vit/clip-vit-base-patch16_svhn.yaml +1 -3
  103. fusion_bench_config/model/clip-vit/clip-vit-base-patch32.yaml +1 -3
  104. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_TALL14.yaml +22 -0
  105. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_TALL20.yaml +29 -0
  106. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_cifar10.yaml +1 -0
  107. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_cifar100.yaml +1 -0
  108. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_dtd.yaml +1 -3
  109. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_eight_tasks.yaml +1 -0
  110. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_emnist_letters.yaml +1 -0
  111. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_eurosat.yaml +1 -3
  112. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_fashion_mnist.yaml +1 -0
  113. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_fer2013.yaml +1 -0
  114. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_food101.yaml +1 -0
  115. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_gtsrb.yaml +1 -3
  116. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_kmnist.yaml +1 -0
  117. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_mnist.yaml +1 -3
  118. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_oxford-iiit-pet.yaml +1 -0
  119. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_oxford_flowers102.yaml +1 -0
  120. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_pcam.yaml +1 -0
  121. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_rendered-sst2.yaml +1 -0
  122. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_resisc45.yaml +1 -3
  123. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_stanford-cars.yaml +1 -3
  124. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_stl10.yaml +1 -0
  125. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_sun397.yaml +1 -3
  126. fusion_bench_config/model/clip-vit/clip-vit-base-patch32_svhn.yaml +1 -3
  127. fusion_bench_config/model/clip-vit/clip-vit-large-patch14.yaml +1 -3
  128. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_TALL14.yaml +22 -0
  129. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_TALL20.yaml +29 -0
  130. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_cifar10.yaml +1 -0
  131. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_cifar100.yaml +1 -0
  132. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_dtd.yaml +1 -3
  133. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_emnist_letters.yaml +1 -0
  134. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_eurosat.yaml +1 -3
  135. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_fashion_mnist.yaml +1 -0
  136. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_fer2013.yaml +1 -0
  137. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_food101.yaml +1 -0
  138. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_gtsrb.yaml +1 -3
  139. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_kmnist.yaml +1 -0
  140. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_mnist.yaml +1 -3
  141. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_oxford-iiit-pet.yaml +1 -0
  142. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_oxford_flowers102.yaml +1 -0
  143. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_pcam.yaml +1 -0
  144. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_rendered-sst2.yaml +1 -0
  145. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_resisc45.yaml +1 -3
  146. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_stanford-cars.yaml +1 -3
  147. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_stl10.yaml +1 -0
  148. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_sun397.yaml +1 -3
  149. fusion_bench_config/model/clip-vit/clip-vit-large-patch14_svhn.yaml +1 -3
  150. fusion_bench_config/model/clip-vit/download_TALL20_models.sh +6 -0
  151. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TA8_model_only.yaml +6 -0
  152. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TALL14.yaml +11 -0
  153. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TALL14_model_only.yaml +9 -0
  154. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TALL20.yaml +11 -0
  155. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TALL20_model_only.yaml +9 -0
  156. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_individual.yaml +15 -3
  157. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TALL14.yaml +8 -0
  158. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TALL14_model_only.yaml +6 -0
  159. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TALL20.yaml +8 -0
  160. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TALL20_model_only.yaml +6 -0
  161. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_individual.yaml +9 -3
  162. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_single_task_projection.yaml +15 -0
  163. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TALL14.yaml +11 -0
  164. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TALL14_model_only.yaml +9 -0
  165. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TALL20.yaml +11 -0
  166. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TALL20_model_only.yaml +9 -0
  167. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_individual.yaml +15 -3
  168. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-base-patch32_robustness_corrupted.yaml +27 -0
  169. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-classification_TALL14.yaml +19 -0
  170. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-classification_TALL20.yaml +26 -0
  171. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_cifar10.yaml +3 -0
  172. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_cifar100.yaml +3 -0
  173. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_dtd.yaml +3 -0
  174. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_emnist_letters.yaml +3 -0
  175. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_eurosat.yaml +3 -0
  176. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_fashion_mnist.yaml +3 -0
  177. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_fer2013.yaml +3 -0
  178. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_food101.yaml +3 -0
  179. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_gtsrb.yaml +3 -0
  180. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_kmnist.yaml +3 -0
  181. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_mnist.yaml +3 -0
  182. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_oxford-iiit-pet.yaml +3 -0
  183. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_oxford_flowers102.yaml +3 -0
  184. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_oxford_flowers102_val.yaml +3 -0
  185. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_pcam.yaml +3 -0
  186. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_rendered-sst2.yaml +3 -0
  187. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_resisc45.yaml +3 -0
  188. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_stanford-cars.yaml +3 -0
  189. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_stl10.yaml +3 -0
  190. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_sun397.yaml +3 -0
  191. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-single-task_svhn.yaml +3 -0
  192. {fusion_bench-0.2.7.dist-info → fusion_bench-0.2.8.dist-info}/LICENSE +0 -0
  193. {fusion_bench-0.2.7.dist-info → fusion_bench-0.2.8.dist-info}/WHEEL +0 -0
  194. {fusion_bench-0.2.7.dist-info → fusion_bench-0.2.8.dist-info}/entry_points.txt +0 -0
  195. {fusion_bench-0.2.7.dist-info → fusion_bench-0.2.8.dist-info}/top_level.txt +0 -0
@@ -0,0 +1 @@
1
+ oxford_flowers102: tanganke/clip-vit-large-patch14_oxford_flowers102
@@ -0,0 +1 @@
1
+ pcam: tanganke/clip-vit-large-patch14_pcam
@@ -0,0 +1 @@
1
+ rendered-sst2: tanganke/clip-vit-large-patch14_rendered-sst2
@@ -1,3 +1 @@
1
- resisc45:
2
- _target_: transformers.CLIPVisionModel.from_pretrained
3
- pretrained_model_name_or_path: tanganke/clip-vit-large-patch14_resisc45
1
+ resisc45: tanganke/clip-vit-large-patch14_resisc45
@@ -1,3 +1 @@
1
- stanford-cars:
2
- _target_: transformers.CLIPVisionModel.from_pretrained
3
- pretrained_model_name_or_path: tanganke/clip-vit-large-patch14_stanford-cars
1
+ stanford-cars: tanganke/clip-vit-large-patch14_stanford-cars
@@ -0,0 +1 @@
1
+ stl10: tanganke/clip-vit-large-patch14_stl10
@@ -1,3 +1 @@
1
- sun397:
2
- _target_: transformers.CLIPVisionModel.from_pretrained
3
- pretrained_model_name_or_path: tanganke/clip-vit-large-patch14_sun397
1
+ sun397: tanganke/clip-vit-large-patch14_sun397
@@ -1,3 +1 @@
1
- svhn:
2
- _target_: transformers.CLIPVisionModel.from_pretrained
3
- pretrained_model_name_or_path: tanganke/clip-vit-large-patch14_svhn
1
+ svhn: tanganke/clip-vit-large-patch14_svhn
@@ -0,0 +1,6 @@
1
+ #! /bin/bash
2
+ for MODEL in clip-vit-base-patch32 clip-vit-base-patch16 clip-vit-large-patch14; do
3
+ for TASK in sun397 stanford-cars resisc45 eurosat svhn gtsrb mnist dtd oxford_flowers102 pcam fer2013 oxford-iiit-pet stl10 cifar100 cifar10 food101 fashion_mnist emnist_letters kmnist rendered-sst2; do
4
+ huggingface-cli download --local-dir tanganke/${MODEL}_${TASK} tanganke/${MODEL}_${TASK}
5
+ done
6
+ done
@@ -0,0 +1,6 @@
1
+ defaults:
2
+ - CLIPVisionModelPool@: _template
3
+ - /model/clip-vit@models: clip-vit-base-patch16_eight_tasks
4
+ processor:
5
+ _target_: transformers.CLIPProcessor.from_pretrained
6
+ pretrained_model_name_or_path: openai/clip-vit-base-patch16
@@ -0,0 +1,11 @@
1
+ # The 14 task used in the paper:
2
+ # Wang et al. Localizing Task Information for Improved Model Merging and Compression
3
+ # http://arxiv.org/abs/2405.07813
4
+ defaults:
5
+ - CLIPVisionModelPool@: _template
6
+ - /model/clip-vit@models: clip-vit-base-patch16_TALL14
7
+ - /dataset/image_classification/train@train_datasets: TALL14
8
+ - /dataset/image_classification/test@test_datasets: TALL14
9
+ processor:
10
+ _target_: transformers.CLIPProcessor.from_pretrained
11
+ pretrained_model_name_or_path: openai/clip-vit-base-patch16
@@ -0,0 +1,9 @@
1
+ # The 14 task used in the paper:
2
+ # Wang et al. Localizing Task Information for Improved Model Merging and Compression
3
+ # http://arxiv.org/abs/2405.07813
4
+ defaults:
5
+ - CLIPVisionModelPool@: _template
6
+ - /model/clip-vit@models: clip-vit-base-patch16_TALL14
7
+ processor:
8
+ _target_: transformers.CLIPProcessor.from_pretrained
9
+ pretrained_model_name_or_path: openai/clip-vit-base-patch16
@@ -0,0 +1,11 @@
1
+ # The 20 task used in the paper:
2
+ # Wang et al. Localizing Task Information for Improved Model Merging and Compression
3
+ # http://arxiv.org/abs/2405.07813
4
+ defaults:
5
+ - CLIPVisionModelPool@: _template
6
+ - /model/clip-vit@models: clip-vit-base-patch16_TALL20
7
+ - /dataset/image_classification/train@train_datasets: TALL20
8
+ - /dataset/image_classification/test@test_datasets: TALL20
9
+ processor:
10
+ _target_: transformers.CLIPProcessor.from_pretrained
11
+ pretrained_model_name_or_path: openai/clip-vit-base-patch16
@@ -0,0 +1,9 @@
1
+ # The 20 task used in the paper:
2
+ # Wang et al. Localizing Task Information for Improved Model Merging and Compression
3
+ # http://arxiv.org/abs/2405.07813
4
+ defaults:
5
+ - CLIPVisionModelPool@: _template
6
+ - /model/clip-vit@models: clip-vit-base-patch16_TALL20
7
+ processor:
8
+ _target_: transformers.CLIPProcessor.from_pretrained
9
+ pretrained_model_name_or_path: openai/clip-vit-base-patch16
@@ -1,7 +1,19 @@
1
+ # This is useful for evluate the performance of a single clip vision model
2
+ #
3
+ # fusion_bench \
4
+ # modelpool=CLIPVisionModelPool/clip-vit-base-patch16_individual \
5
+ # modelpool.base_model=${MODEL_PATH}
6
+ # ...
1
7
  defaults:
2
8
  - CLIPVisionModelPool@: _template
3
- - /model/clip-vit@models:
4
- - clip-vit-base-patch16
9
+
10
+ models:
11
+ _pretrained_:
12
+ _target_: transformers.CLIPVisionModel.from_pretrained
13
+ pretrained_model_name_or_path: ${...base_model}
14
+
5
15
  processor:
6
16
  _target_: transformers.CLIPProcessor.from_pretrained
7
- pretrained_model_name_or_path: openai/clip-vit-base-patch16
17
+ pretrained_model_name_or_path: ${..base_model}
18
+
19
+ base_model: openai/clip-vit-base-patch16
@@ -0,0 +1,8 @@
1
+ # The 14 task used in the paper:
2
+ # Wang et al. Localizing Task Information for Improved Model Merging and Compression
3
+ # http://arxiv.org/abs/2405.07813
4
+ defaults:
5
+ - CLIPVisionModelPool@: _template
6
+ - /model/clip-vit@models: clip-vit-base-patch32_TALL14
7
+ - /dataset/image_classification/train@train_datasets: TALL14
8
+ - /dataset/image_classification/test@test_datasets: TALL14
@@ -0,0 +1,6 @@
1
+ # The 14 task used in the paper:
2
+ # Wang et al. Localizing Task Information for Improved Model Merging and Compression
3
+ # http://arxiv.org/abs/2405.07813
4
+ defaults:
5
+ - CLIPVisionModelPool@: _template
6
+ - /model/clip-vit@models: clip-vit-base-patch32_TALL14
@@ -0,0 +1,8 @@
1
+ # The 20 task used in the paper:
2
+ # Wang et al. Localizing Task Information for Improved Model Merging and Compression
3
+ # http://arxiv.org/abs/2405.07813
4
+ defaults:
5
+ - CLIPVisionModelPool@: _template
6
+ - /model/clip-vit@models: clip-vit-base-patch32_TALL20
7
+ - /dataset/image_classification/train@train_datasets: TALL20
8
+ - /dataset/image_classification/test@test_datasets: TALL20
@@ -0,0 +1,6 @@
1
+ # The 20 task used in the paper:
2
+ # Wang et al. Localizing Task Information for Improved Model Merging and Compression
3
+ # http://arxiv.org/abs/2405.07813
4
+ defaults:
5
+ - CLIPVisionModelPool@: _template
6
+ - /model/clip-vit@models: clip-vit-base-patch32_TALL20
@@ -1,7 +1,13 @@
1
1
  defaults:
2
2
  - CLIPVisionModelPool@: _template
3
- - /model/clip-vit@models:
4
- - clip-vit-base-patch32
3
+
4
+ models:
5
+ _pretrained_:
6
+ _target_: transformers.CLIPVisionModel.from_pretrained
7
+ pretrained_model_name_or_path: ${...base_model}
8
+
5
9
  processor:
6
10
  _target_: transformers.CLIPProcessor.from_pretrained
7
- pretrained_model_name_or_path: openai/clip-vit-base-patch32
11
+ pretrained_model_name_or_path: ${..base_model}
12
+
13
+ base_model: openai/clip-vit-base-patch32
@@ -0,0 +1,15 @@
1
+ defaults:
2
+ - /model/clip-vit@models:
3
+ - clip-vit-base-patch32
4
+ - clip-vit-base-patch32_sun397
5
+ - clip-vit-base-patch32_stanford-cars
6
+
7
+ _target_: fusion_bench.modelpool.CLIPVisionModelPool
8
+ _recursive_: false
9
+
10
+ train_datasets: null
11
+ test_datasets: null
12
+
13
+ processor:
14
+ _target_: transformers.CLIPProcessor.from_pretrained
15
+ pretrained_model_name_or_path: openai/clip-vit-base-patch32
@@ -0,0 +1,11 @@
1
+ # The 14 task used in the paper:
2
+ # Wang et al. Localizing Task Information for Improved Model Merging and Compression
3
+ # http://arxiv.org/abs/2405.07813
4
+ defaults:
5
+ - CLIPVisionModelPool@: _template
6
+ - /model/clip-vit@models: clip-vit-large-patch14_TALL14
7
+ - /dataset/image_classification/train@train_datasets: TALL14
8
+ - /dataset/image_classification/test@test_datasets: TALL14
9
+ processor:
10
+ _target_: transformers.CLIPProcessor.from_pretrained
11
+ pretrained_model_name_or_path: openai/clip-vit-large-patch14
@@ -0,0 +1,9 @@
1
+ # The 14 task used in the paper:
2
+ # Wang et al. Localizing Task Information for Improved Model Merging and Compression
3
+ # http://arxiv.org/abs/2405.07813
4
+ defaults:
5
+ - CLIPVisionModelPool@: _template
6
+ - /model/clip-vit@models: clip-vit-large-patch14_TALL14
7
+ processor:
8
+ _target_: transformers.CLIPProcessor.from_pretrained
9
+ pretrained_model_name_or_path: openai/clip-vit-large-patch14
@@ -0,0 +1,11 @@
1
+ # The 20 task used in the paper:
2
+ # Wang et al. Localizing Task Information for Improved Model Merging and Compression
3
+ # http://arxiv.org/abs/2405.07813
4
+ defaults:
5
+ - CLIPVisionModelPool@: _template
6
+ - /model/clip-vit@models: clip-vit-large-patch14_TALL20
7
+ - /dataset/image_classification/train@train_datasets: TALL20
8
+ - /dataset/image_classification/test@test_datasets: TALL20
9
+ processor:
10
+ _target_: transformers.CLIPProcessor.from_pretrained
11
+ pretrained_model_name_or_path: openai/clip-vit-large-patch14
@@ -0,0 +1,9 @@
1
+ # The 20 task used in the paper:
2
+ # Wang et al. Localizing Task Information for Improved Model Merging and Compression
3
+ # http://arxiv.org/abs/2405.07813
4
+ defaults:
5
+ - CLIPVisionModelPool@: _template
6
+ - /model/clip-vit@models: clip-vit-large-patch14_TALL20
7
+ processor:
8
+ _target_: transformers.CLIPProcessor.from_pretrained
9
+ pretrained_model_name_or_path: openai/clip-vit-large-patch14
@@ -1,7 +1,19 @@
1
+ # This is useful for evluate the performance of a single clip vision model
2
+ #
3
+ # fusion_bench \
4
+ # modelpool=CLIPVisionModelPool/clip-vit-large-patch14_individual \
5
+ # modelpool.base_model=${MODEL_PATH}
6
+ # ...
1
7
  defaults:
2
8
  - CLIPVisionModelPool@: _template
3
- - /model/clip-vit@models:
4
- - clip-vit-large-patch14
9
+
10
+ models:
11
+ _pretrained_:
12
+ _target_: transformers.CLIPVisionModel.from_pretrained
13
+ pretrained_model_name_or_path: ${...base_model}
14
+
5
15
  processor:
6
16
  _target_: transformers.CLIPProcessor.from_pretrained
7
- pretrained_model_name_or_path: openai/clip-vit-large-patch14
17
+ pretrained_model_name_or_path: ${..base_model}
18
+
19
+ base_model: openai/clip-vit-large-patch14
@@ -0,0 +1,27 @@
1
+ type: clip_vit_classification
2
+ name: clip-vit-robustness_clean
3
+ # corrption can be one of:
4
+ # contrast, gaussian_noise, impulse_noise, jpeg_compression, motion_blur, pixelate, spatter
5
+ corruption: ${corruption}
6
+ dataset_type: huggingface_image_classification
7
+ tasks:
8
+ - name: stanford_cars
9
+ dataset:
10
+ name: tanganke/stanford_cars
11
+ split: ${taskpool.corruption}
12
+ - name: eurosat
13
+ dataset:
14
+ name: tanganke/eurosat
15
+ split: ${taskpool.corruption}
16
+ - name: resisc45
17
+ dataset:
18
+ name: tanganke/resisc45
19
+ split: ${taskpool.corruption}
20
+ - name: gtsrb
21
+ dataset:
22
+ name: tanganke/gtsrb
23
+ split: ${taskpool.corruption}
24
+ clip_model: openai/clip-vit-base-patch32
25
+ batch_size: 128
26
+ num_workers: 16
27
+ fast_dev_run: ${fast_dev_run}
@@ -0,0 +1,19 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets:
4
+ # eight tasks in the task arithmetic paper
5
+ - sun397
6
+ - stanford-cars
7
+ - resisc45
8
+ - eurosat
9
+ - svhn
10
+ - gtsrb
11
+ - mnist
12
+ - dtd
13
+ # additional 6 tasks in the TALL mask paper (TALL 14)
14
+ - oxford_flowers102
15
+ - pcam
16
+ - fer2013
17
+ - oxford-iiit-pet
18
+ - stl10
19
+ - cifar100
@@ -0,0 +1,26 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets:
4
+ # eight tasks in the task arithmetic paper
5
+ - sun397
6
+ - stanford-cars
7
+ - resisc45
8
+ - eurosat
9
+ - svhn
10
+ - gtsrb
11
+ - mnist
12
+ - dtd
13
+ # additional 6 tasks in the TALL mask paper (TALL 14)
14
+ - oxford_flowers102
15
+ - pcam
16
+ - fer2013
17
+ - oxford-iiit-pet
18
+ - stl10
19
+ - cifar100
20
+ # additional 6 tasks in the TALL mask paper (TALL 20)
21
+ - cifar10
22
+ - food101
23
+ - fashion_mnist
24
+ - emnist_letters
25
+ - kmnist
26
+ - rendered-sst2
@@ -0,0 +1,3 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets: cifar10
@@ -0,0 +1,3 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets: cifar100
@@ -0,0 +1,3 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets: dtd
@@ -0,0 +1,3 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets: emnist_letters
@@ -0,0 +1,3 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets: eurosat
@@ -0,0 +1,3 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets: fashion_mnist
@@ -0,0 +1,3 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets: fer2013
@@ -0,0 +1,3 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets: food101
@@ -0,0 +1,3 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets: gtsrb
@@ -0,0 +1,3 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets: kmnist
@@ -0,0 +1,3 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets: mnist
@@ -0,0 +1,3 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets: oxford-iiit-pet
@@ -0,0 +1,3 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets: oxford_flowers102
@@ -0,0 +1,3 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/val@test_datasets: oxford_flowers102
@@ -0,0 +1,3 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets: pcam
@@ -0,0 +1,3 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets: rendered-sst2
@@ -0,0 +1,3 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets: resisc45
@@ -0,0 +1,3 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets: stanford-cars
@@ -0,0 +1,3 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets: stl10
@@ -0,0 +1,3 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets: sun397
@@ -0,0 +1,3 @@
1
+ defaults:
2
+ - CLIPVisionModelTaskPool@: _template
3
+ - /dataset/image_classification/test@test_datasets: svhn