mteb 2.5.2__py3-none-any.whl → 2.7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (241) hide show
  1. mteb/__init__.py +2 -0
  2. mteb/_create_dataloaders.py +17 -18
  3. mteb/_evaluators/any_sts_evaluator.py +3 -3
  4. mteb/_evaluators/clustering_evaluator.py +2 -2
  5. mteb/_evaluators/evaluator.py +4 -2
  6. mteb/_evaluators/image/imagetext_pairclassification_evaluator.py +10 -8
  7. mteb/_evaluators/pair_classification_evaluator.py +5 -3
  8. mteb/_evaluators/retrieval_evaluator.py +2 -2
  9. mteb/_evaluators/retrieval_metrics.py +18 -17
  10. mteb/_evaluators/sklearn_evaluator.py +11 -10
  11. mteb/_evaluators/text/bitext_mining_evaluator.py +27 -18
  12. mteb/_evaluators/text/summarization_evaluator.py +23 -18
  13. mteb/_evaluators/zeroshot_classification_evaluator.py +5 -3
  14. mteb/abstasks/_data_filter/filters.py +1 -1
  15. mteb/abstasks/_data_filter/task_pipelines.py +3 -0
  16. mteb/abstasks/_statistics_calculation.py +18 -10
  17. mteb/abstasks/_stratification.py +18 -18
  18. mteb/abstasks/abstask.py +35 -28
  19. mteb/abstasks/aggregate_task_metadata.py +1 -9
  20. mteb/abstasks/aggregated_task.py +10 -29
  21. mteb/abstasks/classification.py +15 -10
  22. mteb/abstasks/clustering.py +19 -15
  23. mteb/abstasks/clustering_legacy.py +10 -10
  24. mteb/abstasks/image/image_text_pair_classification.py +7 -4
  25. mteb/abstasks/multilabel_classification.py +23 -19
  26. mteb/abstasks/pair_classification.py +20 -11
  27. mteb/abstasks/regression.py +4 -4
  28. mteb/abstasks/retrieval.py +28 -24
  29. mteb/abstasks/retrieval_dataset_loaders.py +2 -2
  30. mteb/abstasks/sts.py +8 -5
  31. mteb/abstasks/task_metadata.py +31 -33
  32. mteb/abstasks/text/bitext_mining.py +39 -28
  33. mteb/abstasks/text/reranking.py +8 -6
  34. mteb/abstasks/text/summarization.py +10 -5
  35. mteb/abstasks/zeroshot_classification.py +8 -4
  36. mteb/benchmarks/benchmark.py +4 -2
  37. mteb/benchmarks/benchmarks/__init__.py +4 -0
  38. mteb/benchmarks/benchmarks/benchmarks.py +112 -11
  39. mteb/benchmarks/get_benchmark.py +14 -55
  40. mteb/cache.py +182 -29
  41. mteb/cli/_display_tasks.py +2 -2
  42. mteb/cli/build_cli.py +110 -14
  43. mteb/cli/generate_model_card.py +43 -23
  44. mteb/deprecated_evaluator.py +63 -49
  45. mteb/descriptive_stats/Image/DocumentUnderstanding/KoVidore2CybersecurityRetrieval.json +32 -0
  46. mteb/descriptive_stats/Image/DocumentUnderstanding/KoVidore2EconomicRetrieval.json +32 -0
  47. mteb/descriptive_stats/Image/DocumentUnderstanding/KoVidore2EnergyRetrieval.json +32 -0
  48. mteb/descriptive_stats/Image/DocumentUnderstanding/KoVidore2HrRetrieval.json +32 -0
  49. mteb/descriptive_stats/Retrieval/ChemRxivRetrieval.json +30 -0
  50. mteb/descriptive_stats/Retrieval/EuroPIRQRetrieval.json +116 -0
  51. mteb/descriptive_stats/Retrieval/NanoClimateFEVER-VN.json +30 -0
  52. mteb/descriptive_stats/Retrieval/NanoDBPedia-VN.json +30 -0
  53. mteb/descriptive_stats/Retrieval/NanoFEVER-VN.json +30 -0
  54. mteb/descriptive_stats/Retrieval/NanoHotpotQA-VN.json +30 -0
  55. mteb/descriptive_stats/Retrieval/NanoMSMARCO-VN.json +30 -0
  56. mteb/descriptive_stats/Retrieval/NanoNQ-VN.json +30 -0
  57. mteb/descriptive_stats/Retrieval/TVPLRetrieval.json +30 -0
  58. mteb/evaluate.py +44 -33
  59. mteb/filter_tasks.py +25 -26
  60. mteb/get_tasks.py +29 -30
  61. mteb/languages/language_scripts.py +5 -3
  62. mteb/leaderboard/app.py +162 -34
  63. mteb/load_results.py +12 -12
  64. mteb/models/abs_encoder.py +10 -6
  65. mteb/models/cache_wrappers/cache_backend_protocol.py +3 -5
  66. mteb/models/cache_wrappers/cache_backends/_hash_utils.py +5 -4
  67. mteb/models/cache_wrappers/cache_backends/faiss_cache.py +6 -2
  68. mteb/models/cache_wrappers/cache_backends/numpy_cache.py +43 -25
  69. mteb/models/cache_wrappers/cache_wrapper.py +2 -2
  70. mteb/models/get_model_meta.py +21 -3
  71. mteb/models/instruct_wrapper.py +28 -8
  72. mteb/models/model_implementations/align_models.py +1 -1
  73. mteb/models/model_implementations/andersborges.py +4 -4
  74. mteb/models/model_implementations/ara_models.py +1 -1
  75. mteb/models/model_implementations/arctic_models.py +8 -8
  76. mteb/models/model_implementations/b1ade_models.py +1 -1
  77. mteb/models/model_implementations/bge_models.py +45 -21
  78. mteb/models/model_implementations/bica_model.py +3 -3
  79. mteb/models/model_implementations/blip2_models.py +2 -2
  80. mteb/models/model_implementations/blip_models.py +16 -16
  81. mteb/models/model_implementations/bm25.py +4 -4
  82. mteb/models/model_implementations/bmretriever_models.py +6 -4
  83. mteb/models/model_implementations/cadet_models.py +1 -1
  84. mteb/models/model_implementations/cde_models.py +11 -4
  85. mteb/models/model_implementations/clip_models.py +6 -6
  86. mteb/models/model_implementations/clips_models.py +3 -3
  87. mteb/models/model_implementations/codefuse_models.py +5 -5
  88. mteb/models/model_implementations/codesage_models.py +3 -3
  89. mteb/models/model_implementations/cohere_models.py +5 -5
  90. mteb/models/model_implementations/cohere_v.py +2 -2
  91. mteb/models/model_implementations/colpali_models.py +3 -3
  92. mteb/models/model_implementations/colqwen_models.py +8 -8
  93. mteb/models/model_implementations/colsmol_models.py +2 -2
  94. mteb/models/model_implementations/conan_models.py +1 -1
  95. mteb/models/model_implementations/dino_models.py +42 -42
  96. mteb/models/model_implementations/e5_instruct.py +23 -4
  97. mteb/models/model_implementations/e5_models.py +9 -9
  98. mteb/models/model_implementations/e5_v.py +6 -6
  99. mteb/models/model_implementations/eagerworks_models.py +1 -1
  100. mteb/models/model_implementations/emillykkejensen_models.py +6 -6
  101. mteb/models/model_implementations/en_code_retriever.py +1 -1
  102. mteb/models/model_implementations/euler_models.py +2 -2
  103. mteb/models/model_implementations/fa_models.py +9 -9
  104. mteb/models/model_implementations/facebookai.py +14 -2
  105. mteb/models/model_implementations/geogpt_models.py +1 -1
  106. mteb/models/model_implementations/gme_v_models.py +6 -5
  107. mteb/models/model_implementations/google_models.py +1 -1
  108. mteb/models/model_implementations/granite_vision_embedding_models.py +1 -1
  109. mteb/models/model_implementations/gritlm_models.py +2 -2
  110. mteb/models/model_implementations/gte_models.py +25 -13
  111. mteb/models/model_implementations/hinvec_models.py +1 -1
  112. mteb/models/model_implementations/ibm_granite_models.py +30 -6
  113. mteb/models/model_implementations/inf_models.py +2 -2
  114. mteb/models/model_implementations/jasper_models.py +2 -2
  115. mteb/models/model_implementations/jina_clip.py +48 -10
  116. mteb/models/model_implementations/jina_models.py +18 -11
  117. mteb/models/model_implementations/kblab.py +12 -6
  118. mteb/models/model_implementations/kennethenevoldsen_models.py +4 -4
  119. mteb/models/model_implementations/kfst.py +1 -1
  120. mteb/models/model_implementations/kowshik24_models.py +1 -1
  121. mteb/models/model_implementations/lgai_embedding_models.py +1 -1
  122. mteb/models/model_implementations/linq_models.py +1 -1
  123. mteb/models/model_implementations/listconranker.py +1 -1
  124. mteb/models/model_implementations/llm2clip_models.py +6 -6
  125. mteb/models/model_implementations/llm2vec_models.py +8 -8
  126. mteb/models/model_implementations/mcinext_models.py +4 -1
  127. mteb/models/model_implementations/mdbr_models.py +17 -3
  128. mteb/models/model_implementations/misc_models.py +68 -68
  129. mteb/models/model_implementations/mixedbread_ai_models.py +332 -0
  130. mteb/models/model_implementations/mme5_models.py +1 -1
  131. mteb/models/model_implementations/moco_models.py +4 -4
  132. mteb/models/model_implementations/mod_models.py +1 -1
  133. mteb/models/model_implementations/model2vec_models.py +14 -14
  134. mteb/models/model_implementations/moka_models.py +1 -1
  135. mteb/models/model_implementations/nbailab.py +3 -3
  136. mteb/models/model_implementations/no_instruct_sentence_models.py +2 -2
  137. mteb/models/model_implementations/nomic_models.py +30 -15
  138. mteb/models/model_implementations/nomic_models_vision.py +1 -1
  139. mteb/models/model_implementations/nvidia_llama_nemoretriever_colemb.py +15 -9
  140. mteb/models/model_implementations/nvidia_models.py +151 -19
  141. mteb/models/model_implementations/octen_models.py +61 -2
  142. mteb/models/model_implementations/openclip_models.py +13 -13
  143. mteb/models/model_implementations/opensearch_neural_sparse_models.py +5 -5
  144. mteb/models/model_implementations/ops_moa_models.py +1 -1
  145. mteb/models/model_implementations/ordalietech_solon_embeddings_mini_beta_1_1.py +1 -1
  146. mteb/models/model_implementations/pawan_models.py +1 -1
  147. mteb/models/model_implementations/piccolo_models.py +1 -1
  148. mteb/models/model_implementations/pixie_models.py +56 -0
  149. mteb/models/model_implementations/promptriever_models.py +4 -4
  150. mteb/models/model_implementations/pylate_models.py +10 -9
  151. mteb/models/model_implementations/qodo_models.py +2 -2
  152. mteb/models/model_implementations/qtack_models.py +1 -1
  153. mteb/models/model_implementations/qwen3_models.py +3 -3
  154. mteb/models/model_implementations/qzhou_models.py +2 -2
  155. mteb/models/model_implementations/random_baseline.py +3 -3
  156. mteb/models/model_implementations/rasgaard_models.py +2 -2
  157. mteb/models/model_implementations/reasonir_model.py +1 -1
  158. mteb/models/model_implementations/repllama_models.py +3 -3
  159. mteb/models/model_implementations/rerankers_custom.py +12 -6
  160. mteb/models/model_implementations/rerankers_monot5_based.py +17 -17
  161. mteb/models/model_implementations/richinfoai_models.py +1 -1
  162. mteb/models/model_implementations/ru_sentence_models.py +20 -20
  163. mteb/models/model_implementations/ruri_models.py +10 -10
  164. mteb/models/model_implementations/salesforce_models.py +3 -3
  165. mteb/models/model_implementations/samilpwc_models.py +1 -1
  166. mteb/models/model_implementations/sarashina_embedding_models.py +2 -2
  167. mteb/models/model_implementations/searchmap_models.py +1 -1
  168. mteb/models/model_implementations/seed_1_6_embedding_models_1215.py +113 -146
  169. mteb/models/model_implementations/sentence_transformers_models.py +124 -22
  170. mteb/models/model_implementations/shuu_model.py +1 -1
  171. mteb/models/model_implementations/siglip_models.py +20 -20
  172. mteb/models/model_implementations/slm_models.py +416 -0
  173. mteb/models/model_implementations/spartan8806_atles_champion.py +1 -1
  174. mteb/models/model_implementations/stella_models.py +17 -4
  175. mteb/models/model_implementations/tarka_models.py +2 -2
  176. mteb/models/model_implementations/text2vec_models.py +9 -3
  177. mteb/models/model_implementations/ua_sentence_models.py +1 -1
  178. mteb/models/model_implementations/uae_models.py +7 -1
  179. mteb/models/model_implementations/vdr_models.py +1 -1
  180. mteb/models/model_implementations/vi_vn_models.py +6 -6
  181. mteb/models/model_implementations/vlm2vec_models.py +3 -3
  182. mteb/models/model_implementations/voyage_models.py +84 -0
  183. mteb/models/model_implementations/voyage_v.py +9 -7
  184. mteb/models/model_implementations/youtu_models.py +1 -1
  185. mteb/models/model_implementations/yuan_models.py +1 -1
  186. mteb/models/model_implementations/yuan_models_en.py +1 -1
  187. mteb/models/model_meta.py +80 -31
  188. mteb/models/models_protocols.py +22 -6
  189. mteb/models/search_encoder_index/search_indexes/faiss_search_index.py +9 -6
  190. mteb/models/search_wrappers.py +33 -18
  191. mteb/models/sentence_transformer_wrapper.py +50 -25
  192. mteb/models/vllm_wrapper.py +327 -0
  193. mteb/py.typed +0 -0
  194. mteb/results/benchmark_results.py +29 -21
  195. mteb/results/model_result.py +52 -22
  196. mteb/results/task_result.py +80 -58
  197. mteb/similarity_functions.py +11 -7
  198. mteb/tasks/classification/dan/dk_hate_classification.py +1 -1
  199. mteb/tasks/classification/est/estonian_valence.py +1 -1
  200. mteb/tasks/classification/kur/kurdish_sentiment_classification.py +2 -2
  201. mteb/tasks/classification/multilingual/scala_classification.py +1 -1
  202. mteb/tasks/clustering/eng/hume_wiki_cities_clustering.py +1 -1
  203. mteb/tasks/clustering/eng/wiki_cities_clustering.py +1 -1
  204. mteb/tasks/clustering/zho/cmteb_clustering.py +2 -2
  205. mteb/tasks/image_text_pair_classification/eng/sugar_crepe.py +1 -1
  206. mteb/tasks/reranking/multilingual/wikipedia_reranking_multilingual.py +1 -1
  207. mteb/tasks/retrieval/code/code_rag.py +12 -12
  208. mteb/tasks/retrieval/dan/dan_fever_retrieval.py +1 -1
  209. mteb/tasks/retrieval/dan/tv2_nordretrieval.py +2 -2
  210. mteb/tasks/retrieval/dan/twitter_hjerne_retrieval.py +2 -2
  211. mteb/tasks/retrieval/eng/__init__.py +2 -0
  212. mteb/tasks/retrieval/eng/chemrxiv.py +33 -0
  213. mteb/tasks/retrieval/eng/cub200_i2i_retrieval.py +1 -1
  214. mteb/tasks/retrieval/kor/__init__.py +15 -1
  215. mteb/tasks/retrieval/kor/kovidore2_bench_retrieval.py +142 -0
  216. mteb/tasks/retrieval/multilingual/__init__.py +2 -0
  217. mteb/tasks/retrieval/multilingual/euro_pirq_retrieval.py +43 -0
  218. mteb/tasks/retrieval/multilingual/vidore3_bench_retrieval.py +90 -100
  219. mteb/tasks/retrieval/nob/norquad.py +2 -2
  220. mteb/tasks/retrieval/nob/snl_retrieval.py +2 -2
  221. mteb/tasks/retrieval/tur/tur_hist_quad.py +1 -1
  222. mteb/tasks/retrieval/vie/__init__.py +14 -6
  223. mteb/tasks/retrieval/vie/climate_fevervn_retrieval.py +39 -0
  224. mteb/tasks/retrieval/vie/db_pedia_vn_retrieval.py +39 -0
  225. mteb/tasks/retrieval/vie/fevervn_retrieval.py +39 -0
  226. mteb/tasks/retrieval/vie/hotpot_qavn_retrieval.py +39 -0
  227. mteb/tasks/retrieval/vie/msmarcovn_retrieval.py +48 -0
  228. mteb/tasks/retrieval/vie/nqvn_retrieval.py +39 -0
  229. mteb/tasks/retrieval/vie/tvpl_retrieval.py +42 -0
  230. mteb/tasks/retrieval/vie/zac_legal_text_retrieval.py +15 -1
  231. mteb/types/__init__.py +2 -0
  232. mteb/types/_encoder_io.py +12 -0
  233. mteb/types/_result.py +2 -1
  234. mteb/types/statistics.py +9 -3
  235. {mteb-2.5.2.dist-info → mteb-2.7.2.dist-info}/METADATA +15 -4
  236. {mteb-2.5.2.dist-info → mteb-2.7.2.dist-info}/RECORD +240 -219
  237. mteb/models/model_implementations/mxbai_models.py +0 -111
  238. {mteb-2.5.2.dist-info → mteb-2.7.2.dist-info}/WHEEL +0 -0
  239. {mteb-2.5.2.dist-info → mteb-2.7.2.dist-info}/entry_points.txt +0 -0
  240. {mteb-2.5.2.dist-info → mteb-2.7.2.dist-info}/licenses/LICENSE +0 -0
  241. {mteb-2.5.2.dist-info → mteb-2.7.2.dist-info}/top_level.txt +0 -0
@@ -35,7 +35,7 @@ codesage_large = ModelMeta(
35
35
  open_weights=True,
36
36
  public_training_code=None,
37
37
  public_training_data=None,
38
- framework=["PyTorch"],
38
+ framework=["PyTorch", "Transformers"],
39
39
  reference="https://huggingface.co/codesage/codesage-large-v2",
40
40
  similarity_fn_name=ScoringFunction.COSINE,
41
41
  use_instructions=False,
@@ -62,7 +62,7 @@ codesage_base = ModelMeta(
62
62
  open_weights=True,
63
63
  public_training_code=None,
64
64
  public_training_data=None,
65
- framework=["PyTorch"],
65
+ framework=["PyTorch", "Transformers"],
66
66
  reference="https://huggingface.co/codesage/codesage-base-v2",
67
67
  similarity_fn_name=ScoringFunction.COSINE,
68
68
  use_instructions=False,
@@ -89,7 +89,7 @@ codesage_small = ModelMeta(
89
89
  open_weights=True,
90
90
  public_training_code=None,
91
91
  public_training_data=None,
92
- framework=["PyTorch"],
92
+ framework=["PyTorch", "Transformers"],
93
93
  reference="https://huggingface.co/codesage/codesage-small-v2",
94
94
  similarity_fn_name=ScoringFunction.COSINE,
95
95
  use_instructions=False,
@@ -222,7 +222,7 @@ class CohereTextEmbeddingModel(AbsEncoder):
222
222
  ) -> None:
223
223
  requires_package(self, "cohere", model_name, "pip install 'mteb[cohere]'")
224
224
 
225
- import cohere # type: ignore
225
+ import cohere
226
226
 
227
227
  self.model_name = model_name.removeprefix("Cohere/Cohere-")
228
228
  self.sep = sep
@@ -392,7 +392,7 @@ cohere_mult_3 = ModelMeta(
392
392
  reference="https://cohere.com/blog/introducing-embed-v3",
393
393
  license=None,
394
394
  similarity_fn_name=ScoringFunction.COSINE,
395
- framework=["API"],
395
+ framework=["API", "Transformers"],
396
396
  use_instructions=True,
397
397
  public_training_code=None,
398
398
  public_training_data=None, # assumed
@@ -417,7 +417,7 @@ cohere_eng_3 = ModelMeta(
417
417
  embed_dim=1024,
418
418
  license=None,
419
419
  similarity_fn_name=ScoringFunction.COSINE,
420
- framework=["API"],
420
+ framework=["API", "Transformers"],
421
421
  use_instructions=True,
422
422
  public_training_code=None,
423
423
  public_training_data=None, # assumed
@@ -442,7 +442,7 @@ cohere_mult_light_3 = ModelMeta(
442
442
  embed_dim=384,
443
443
  license=None,
444
444
  similarity_fn_name=ScoringFunction.COSINE,
445
- framework=["API"],
445
+ framework=["API", "Transformers"],
446
446
  use_instructions=True,
447
447
  public_training_code=None,
448
448
  public_training_data=None, # assumed
@@ -467,7 +467,7 @@ cohere_eng_light_3 = ModelMeta(
467
467
  embed_dim=384,
468
468
  license=None,
469
469
  similarity_fn_name=ScoringFunction.COSINE,
470
- framework=["API"],
470
+ framework=["API", "Transformers"],
471
471
  use_instructions=True,
472
472
  public_training_code=None,
473
473
  public_training_data=None, # assumed
@@ -378,7 +378,7 @@ def cohere_v_loader(model_name, **kwargs):
378
378
 
379
379
 
380
380
  cohere_mult_3 = ModelMeta(
381
- loader=cohere_v_loader, # type: ignore
381
+ loader=cohere_v_loader,
382
382
  loader_kwargs={"model_name": "embed-multilingual-v3.0"},
383
383
  name="cohere/embed-multilingual-v3.0",
384
384
  model_type=["dense"],
@@ -402,7 +402,7 @@ cohere_mult_3 = ModelMeta(
402
402
  )
403
403
 
404
404
  cohere_eng_3 = ModelMeta(
405
- loader=cohere_v_loader, # type: ignore
405
+ loader=cohere_v_loader,
406
406
  loader_kwargs={"model_name": "embed-english-v3.0"},
407
407
  name="cohere/embed-english-v3.0",
408
408
  model_type=["dense"],
@@ -226,7 +226,7 @@ colpali_v1_1 = ModelMeta(
226
226
  open_weights=True,
227
227
  public_training_code="https://github.com/illuin-tech/colpali",
228
228
  public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
229
- framework=["ColPali"],
229
+ framework=["ColPali", "safetensors"],
230
230
  reference="https://huggingface.co/vidore/colpali-v1.1",
231
231
  similarity_fn_name=ScoringFunction.MAX_SIM,
232
232
  use_instructions=True,
@@ -253,7 +253,7 @@ colpali_v1_2 = ModelMeta(
253
253
  open_weights=True,
254
254
  public_training_code="https://github.com/illuin-tech/colpali",
255
255
  public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
256
- framework=["ColPali"],
256
+ framework=["ColPali", "safetensors"],
257
257
  reference="https://huggingface.co/vidore/colpali-v1.2",
258
258
  similarity_fn_name=ScoringFunction.MAX_SIM,
259
259
  use_instructions=True,
@@ -280,7 +280,7 @@ colpali_v1_3 = ModelMeta(
280
280
  open_weights=True,
281
281
  public_training_code="https://github.com/illuin-tech/colpali",
282
282
  public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
283
- framework=["ColPali"],
283
+ framework=["ColPali", "safetensors"],
284
284
  reference="https://huggingface.co/vidore/colpali-v1.3",
285
285
  similarity_fn_name=ScoringFunction.MAX_SIM,
286
286
  use_instructions=True,
@@ -226,7 +226,7 @@ colqwen2 = ModelMeta(
226
226
  open_weights=True,
227
227
  public_training_code="https://github.com/illuin-tech/colpali",
228
228
  public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
229
- framework=["ColPali"],
229
+ framework=["ColPali", "safetensors"],
230
230
  reference="https://huggingface.co/vidore/colqwen2-v1.0",
231
231
  similarity_fn_name="MaxSim",
232
232
  use_instructions=True,
@@ -253,7 +253,7 @@ colqwen2_5 = ModelMeta(
253
253
  open_weights=True,
254
254
  public_training_code="https://github.com/illuin-tech/colpali",
255
255
  public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
256
- framework=["ColPali"],
256
+ framework=["ColPali", "safetensors"],
257
257
  reference="https://huggingface.co/vidore/colqwen2.5-v0.2",
258
258
  similarity_fn_name="MaxSim",
259
259
  use_instructions=True,
@@ -297,7 +297,7 @@ colqwen3_8b = ModelMeta(
297
297
  open_weights=True,
298
298
  public_training_code="https://github.com/illuin-tech/colpali",
299
299
  public_training_data=None,
300
- framework=["PyTorch"],
300
+ framework=["PyTorch", "Transformers", "safetensors"],
301
301
  reference="https://huggingface.co/TomoroAI/tomoro-colqwen3-embed-8b",
302
302
  similarity_fn_name=ScoringFunction.MAX_SIM,
303
303
  use_instructions=True,
@@ -321,7 +321,7 @@ colqwen3_4b = ModelMeta(
321
321
  open_weights=True,
322
322
  public_training_code="https://github.com/illuin-tech/colpali",
323
323
  public_training_data=None,
324
- framework=["PyTorch"],
324
+ framework=["PyTorch", "Transformers", "safetensors"],
325
325
  reference="https://huggingface.co/TomoroAI/tomoro-colqwen3-embed-4b",
326
326
  similarity_fn_name=ScoringFunction.MAX_SIM,
327
327
  use_instructions=True,
@@ -348,7 +348,7 @@ colnomic_7b = ModelMeta(
348
348
  open_weights=True,
349
349
  public_training_code="https://github.com/nomic-ai/colpali",
350
350
  public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
351
- framework=["ColPali"],
351
+ framework=["ColPali", "safetensors"],
352
352
  reference="https://huggingface.co/nomic-ai/colnomic-embed-multimodal-7b",
353
353
  similarity_fn_name="MaxSim",
354
354
  use_instructions=True,
@@ -393,7 +393,7 @@ colnomic_3b = ModelMeta(
393
393
  open_weights=True,
394
394
  public_training_code="https://github.com/nomic-ai/colpali",
395
395
  public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
396
- framework=["ColPali"],
396
+ framework=["ColPali", "safetensors"],
397
397
  reference="https://huggingface.co/nomic-ai/colnomic-embed-multimodal-3b",
398
398
  similarity_fn_name="MaxSim",
399
399
  use_instructions=True,
@@ -458,7 +458,7 @@ evoqwen25_vl_retriever_3b_v1 = ModelMeta(
458
458
  open_weights=True,
459
459
  public_training_code="https://github.com/illuin-tech/colpali",
460
460
  public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
461
- framework=["ColPali"],
461
+ framework=["ColPali", "safetensors"],
462
462
  reference="https://huggingface.co/ApsaraStackMaaS/EvoQwen2.5-VL-Retriever-3B-v1",
463
463
  similarity_fn_name="MaxSim",
464
464
  use_instructions=True,
@@ -484,7 +484,7 @@ evoqwen25_vl_retriever_7b_v1 = ModelMeta(
484
484
  open_weights=True,
485
485
  public_training_code="https://github.com/illuin-tech/colpali",
486
486
  public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
487
- framework=["ColPali"],
487
+ framework=["ColPali", "safetensors"],
488
488
  reference="https://huggingface.co/ApsaraStackMaaS/EvoQwen2.5-VL-Retriever-7B-v1",
489
489
  similarity_fn_name="MaxSim",
490
490
  use_instructions=True,
@@ -67,7 +67,7 @@ colsmol_256m = ModelMeta(
67
67
  open_weights=True,
68
68
  public_training_code="https://github.com/illuin-tech/colpali",
69
69
  public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
70
- framework=["ColPali"],
70
+ framework=["ColPali", "safetensors"],
71
71
  reference="https://huggingface.co/vidore/colSmol-256M",
72
72
  similarity_fn_name="MaxSim",
73
73
  use_instructions=True,
@@ -94,7 +94,7 @@ colsmol_500m = ModelMeta(
94
94
  open_weights=True,
95
95
  public_training_code="https://github.com/illuin-tech/colpali",
96
96
  public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
97
- framework=["ColPali"],
97
+ framework=["ColPali", "safetensors"],
98
98
  reference="https://huggingface.co/vidore/colSmol-500M",
99
99
  similarity_fn_name="MaxSim",
100
100
  use_instructions=True,
@@ -209,7 +209,7 @@ Conan_embedding_v2 = ModelMeta(
209
209
  license="apache-2.0",
210
210
  reference="https://huggingface.co/TencentBAC/Conan-embedding-v2",
211
211
  similarity_fn_name="cosine",
212
- framework=["API"],
212
+ framework=["API", "Sentence Transformers", "Transformers"],
213
213
  use_instructions=True,
214
214
  training_datasets=E5_MISTRAL_TRAINING_DATA | bge_full_data | conan_zh_datasets,
215
215
  public_training_code=None,
@@ -104,7 +104,7 @@ dinov2_training_datasets = set(
104
104
 
105
105
 
106
106
  dinov2_small = ModelMeta(
107
- loader=DINOModel, # type: ignore
107
+ loader=DINOModel,
108
108
  name="facebook/dinov2-small",
109
109
  model_type=["dense"],
110
110
  languages=["eng-Latn"],
@@ -119,13 +119,13 @@ dinov2_small = ModelMeta(
119
119
  open_weights=True,
120
120
  public_training_code="https://github.com/facebookresearch/dinov2",
121
121
  public_training_data=None,
122
- framework=["PyTorch"],
122
+ framework=["PyTorch", "Transformers", "safetensors"],
123
123
  reference="https://huggingface.co/facebook/dinov2-small",
124
124
  similarity_fn_name=ScoringFunction.COSINE,
125
125
  use_instructions=False,
126
126
  training_datasets=dinov2_training_datasets,
127
127
  citation="""@misc{oquab2023dinov2,
128
- title={DINOv2: Learning Robust Visual Features without Supervision},
128
+ title={DINOv2: Learning Robust Visual Features without Supervision},
129
129
  author={Maxime Oquab and Timothée Darcet and Théo Moutakanni and Huy Vo and Marc Szafraniec and Vasil Khalidov and Pierre Fernandez and Daniel Haziza and Francisco Massa and Alaaeldin El-Nouby and Mahmoud Assran and Nicolas Ballas and Wojciech Galuba and Russell Howes and Po-Yao Huang and Shang-Wen Li and Ishan Misra and Michael Rabbat and Vasu Sharma and Gabriel Synnaeve and Hu Xu and Hervé Jegou and Julien Mairal and Patrick Labatut and Armand Joulin and Piotr Bojanowski},
130
130
  year={2023},
131
131
  eprint={2304.07193},
@@ -135,7 +135,7 @@ dinov2_small = ModelMeta(
135
135
  )
136
136
 
137
137
  dinov2_base = ModelMeta(
138
- loader=DINOModel, # type: ignore
138
+ loader=DINOModel,
139
139
  name="facebook/dinov2-base",
140
140
  model_type=["dense"],
141
141
  languages=["eng-Latn"],
@@ -150,13 +150,13 @@ dinov2_base = ModelMeta(
150
150
  open_weights=True,
151
151
  public_training_code="https://github.com/facebookresearch/dinov2",
152
152
  public_training_data=None,
153
- framework=["PyTorch"],
153
+ framework=["PyTorch", "Transformers", "safetensors"],
154
154
  reference="https://huggingface.co/facebook/dinov2-base",
155
155
  similarity_fn_name=ScoringFunction.COSINE,
156
156
  use_instructions=False,
157
157
  training_datasets=dinov2_training_datasets,
158
158
  citation="""@misc{oquab2023dinov2,
159
- title={DINOv2: Learning Robust Visual Features without Supervision},
159
+ title={DINOv2: Learning Robust Visual Features without Supervision},
160
160
  author={Maxime Oquab and Timothée Darcet and Théo Moutakanni and Huy Vo and Marc Szafraniec and Vasil Khalidov and Pierre Fernandez and Daniel Haziza and Francisco Massa and Alaaeldin El-Nouby and Mahmoud Assran and Nicolas Ballas and Wojciech Galuba and Russell Howes and Po-Yao Huang and Shang-Wen Li and Ishan Misra and Michael Rabbat and Vasu Sharma and Gabriel Synnaeve and Hu Xu and Hervé Jegou and Julien Mairal and Patrick Labatut and Armand Joulin and Piotr Bojanowski},
161
161
  year={2023},
162
162
  eprint={2304.07193},
@@ -166,7 +166,7 @@ dinov2_base = ModelMeta(
166
166
  )
167
167
 
168
168
  dinov2_large = ModelMeta(
169
- loader=DINOModel, # type: ignore
169
+ loader=DINOModel,
170
170
  name="facebook/dinov2-large",
171
171
  model_type=["dense"],
172
172
  languages=["eng-Latn"],
@@ -181,13 +181,13 @@ dinov2_large = ModelMeta(
181
181
  open_weights=True,
182
182
  public_training_code="https://github.com/facebookresearch/dinov2",
183
183
  public_training_data=None,
184
- framework=["PyTorch"],
184
+ framework=["PyTorch", "Transformers", "safetensors"],
185
185
  reference="https://huggingface.co/facebook/dinov2-large",
186
186
  similarity_fn_name=ScoringFunction.COSINE,
187
187
  use_instructions=False,
188
188
  training_datasets=dinov2_training_datasets,
189
189
  citation="""@misc{oquab2023dinov2,
190
- title={DINOv2: Learning Robust Visual Features without Supervision},
190
+ title={DINOv2: Learning Robust Visual Features without Supervision},
191
191
  author={Maxime Oquab and Timothée Darcet and Théo Moutakanni and Huy Vo and Marc Szafraniec and Vasil Khalidov and Pierre Fernandez and Daniel Haziza and Francisco Massa and Alaaeldin El-Nouby and Mahmoud Assran and Nicolas Ballas and Wojciech Galuba and Russell Howes and Po-Yao Huang and Shang-Wen Li and Ishan Misra and Michael Rabbat and Vasu Sharma and Gabriel Synnaeve and Hu Xu and Hervé Jegou and Julien Mairal and Patrick Labatut and Armand Joulin and Piotr Bojanowski},
192
192
  year={2023},
193
193
  eprint={2304.07193},
@@ -197,7 +197,7 @@ dinov2_large = ModelMeta(
197
197
  )
198
198
 
199
199
  dinov2_giant = ModelMeta(
200
- loader=DINOModel, # type: ignore
200
+ loader=DINOModel,
201
201
  name="facebook/dinov2-giant",
202
202
  model_type=["dense"],
203
203
  languages=["eng-Latn"],
@@ -212,13 +212,13 @@ dinov2_giant = ModelMeta(
212
212
  open_weights=True,
213
213
  public_training_code="https://github.com/facebookresearch/dinov2",
214
214
  public_training_data=None,
215
- framework=["PyTorch"],
215
+ framework=["PyTorch", "Transformers", "safetensors"],
216
216
  reference="https://huggingface.co/facebook/dinov2-giant",
217
217
  similarity_fn_name=ScoringFunction.COSINE,
218
218
  use_instructions=False,
219
219
  training_datasets=dinov2_training_datasets,
220
220
  citation="""@misc{oquab2023dinov2,
221
- title={DINOv2: Learning Robust Visual Features without Supervision},
221
+ title={DINOv2: Learning Robust Visual Features without Supervision},
222
222
  author={Maxime Oquab and Timothée Darcet and Théo Moutakanni and Huy Vo and Marc Szafraniec and Vasil Khalidov and Pierre Fernandez and Daniel Haziza and Francisco Massa and Alaaeldin El-Nouby and Mahmoud Assran and Nicolas Ballas and Wojciech Galuba and Russell Howes and Po-Yao Huang and Shang-Wen Li and Ishan Misra and Michael Rabbat and Vasu Sharma and Gabriel Synnaeve and Hu Xu and Hervé Jegou and Julien Mairal and Patrick Labatut and Armand Joulin and Piotr Bojanowski},
223
223
  year={2023},
224
224
  eprint={2304.07193},
@@ -247,13 +247,13 @@ webssl_dino300m_full2b = ModelMeta(
247
247
  open_weights=True,
248
248
  public_training_code="",
249
249
  public_training_data=None,
250
- framework=["PyTorch"],
250
+ framework=["PyTorch", "Transformers", "safetensors"],
251
251
  reference="https://huggingface.co/facebook/webssl-dino300m-full2b-224",
252
252
  similarity_fn_name=None,
253
253
  use_instructions=False,
254
254
  training_datasets=webssl_dino_training_datasets,
255
255
  citation="""@article{fan2025scaling,
256
- title={Scaling Language-Free Visual Representation Learning},
256
+ title={Scaling Language-Free Visual Representation Learning},
257
257
  author={David Fan and Shengbang Tong and Jiachen Zhu and Koustuv Sinha and Zhuang Liu and Xinlei Chen and Michael Rabbat and Nicolas Ballas and Yann LeCun and Amir Bar and Saining Xie},
258
258
  year={2025},
259
259
  eprint={2504.01017},
@@ -278,13 +278,13 @@ webssl_dino1b_full2b = ModelMeta(
278
278
  open_weights=True,
279
279
  public_training_code="",
280
280
  public_training_data=None,
281
- framework=["PyTorch"],
281
+ framework=["PyTorch", "Transformers", "safetensors"],
282
282
  reference="https://huggingface.co/facebook/webssl-dino1b-full2b-224",
283
283
  similarity_fn_name=None,
284
284
  use_instructions=False,
285
285
  training_datasets=webssl_dino_training_datasets,
286
286
  citation="""@article{fan2025scaling,
287
- title={Scaling Language-Free Visual Representation Learning},
287
+ title={Scaling Language-Free Visual Representation Learning},
288
288
  author={David Fan and Shengbang Tong and Jiachen Zhu and Koustuv Sinha and Zhuang Liu and Xinlei Chen and Michael Rabbat and Nicolas Ballas and Yann LeCun and Amir Bar and Saining Xie},
289
289
  year={2025},
290
290
  eprint={2504.01017},
@@ -309,13 +309,13 @@ webssl_dino2b_full2b = ModelMeta(
309
309
  open_weights=True,
310
310
  public_training_code="",
311
311
  public_training_data=None,
312
- framework=["PyTorch"],
312
+ framework=["PyTorch", "Transformers", "safetensors"],
313
313
  reference="https://huggingface.co/facebook/webssl-dino2b-full2b-224",
314
314
  similarity_fn_name=None,
315
315
  use_instructions=False,
316
316
  training_datasets=webssl_dino_training_datasets,
317
317
  citation="""@article{fan2025scaling,
318
- title={Scaling Language-Free Visual Representation Learning},
318
+ title={Scaling Language-Free Visual Representation Learning},
319
319
  author={David Fan and Shengbang Tong and Jiachen Zhu and Koustuv Sinha and Zhuang Liu and Xinlei Chen and Michael Rabbat and Nicolas Ballas and Yann LeCun and Amir Bar and Saining Xie},
320
320
  year={2025},
321
321
  eprint={2504.01017},
@@ -340,13 +340,13 @@ webssl_dino3b_full2b = ModelMeta(
340
340
  open_weights=True,
341
341
  public_training_code="",
342
342
  public_training_data=None,
343
- framework=["PyTorch"],
343
+ framework=["PyTorch", "Transformers", "safetensors"],
344
344
  reference="https://huggingface.co/facebook/webssl-dino3b-full2b-224",
345
345
  similarity_fn_name=None,
346
346
  use_instructions=False,
347
347
  training_datasets=webssl_dino_training_datasets,
348
348
  citation="""@article{fan2025scaling,
349
- title={Scaling Language-Free Visual Representation Learning},
349
+ title={Scaling Language-Free Visual Representation Learning},
350
350
  author={David Fan and Shengbang Tong and Jiachen Zhu and Koustuv Sinha and Zhuang Liu and Xinlei Chen and Michael Rabbat and Nicolas Ballas and Yann LeCun and Amir Bar and Saining Xie},
351
351
  year={2025},
352
352
  eprint={2504.01017},
@@ -371,13 +371,13 @@ webssl_dino5b_full2b = ModelMeta(
371
371
  open_weights=True,
372
372
  public_training_code="",
373
373
  public_training_data=None,
374
- framework=["PyTorch"],
374
+ framework=["PyTorch", "Transformers", "safetensors"],
375
375
  reference="https://huggingface.co/facebook/webssl-dino5b-full2b-224",
376
376
  similarity_fn_name=None,
377
377
  use_instructions=False,
378
378
  training_datasets=webssl_dino_training_datasets,
379
379
  citation="""@article{fan2025scaling,
380
- title={Scaling Language-Free Visual Representation Learning},
380
+ title={Scaling Language-Free Visual Representation Learning},
381
381
  author={David Fan and Shengbang Tong and Jiachen Zhu and Koustuv Sinha and Zhuang Liu and Xinlei Chen and Michael Rabbat and Nicolas Ballas and Yann LeCun and Amir Bar and Saining Xie},
382
382
  year={2025},
383
383
  eprint={2504.01017},
@@ -402,13 +402,13 @@ webssl_dino7b_full8b_224 = ModelMeta(
402
402
  open_weights=True,
403
403
  public_training_code="",
404
404
  public_training_data=None,
405
- framework=["PyTorch"],
405
+ framework=["PyTorch", "Transformers", "safetensors"],
406
406
  reference="https://huggingface.co/facebook/webssl-dino7b-full8b-224",
407
407
  similarity_fn_name=None,
408
408
  use_instructions=False,
409
409
  training_datasets=webssl_dino_training_datasets,
410
410
  citation="""@article{fan2025scaling,
411
- title={Scaling Language-Free Visual Representation Learning},
411
+ title={Scaling Language-Free Visual Representation Learning},
412
412
  author={David Fan and Shengbang Tong and Jiachen Zhu and Koustuv Sinha and Zhuang Liu and Xinlei Chen and Michael Rabbat and Nicolas Ballas and Yann LeCun and Amir Bar and Saining Xie},
413
413
  year={2025},
414
414
  eprint={2504.01017},
@@ -433,13 +433,13 @@ webssl_dino7b_full8b_378 = ModelMeta(
433
433
  open_weights=True,
434
434
  public_training_code="",
435
435
  public_training_data=None,
436
- framework=["PyTorch"],
436
+ framework=["PyTorch", "Transformers", "safetensors"],
437
437
  reference="https://huggingface.co/facebook/webssl-dino7b-full8b-378",
438
438
  similarity_fn_name=None,
439
439
  use_instructions=False,
440
440
  training_datasets=webssl_dino_training_datasets,
441
441
  citation="""@article{fan2025scaling,
442
- title={Scaling Language-Free Visual Representation Learning},
442
+ title={Scaling Language-Free Visual Representation Learning},
443
443
  author={David Fan and Shengbang Tong and Jiachen Zhu and Koustuv Sinha and Zhuang Liu and Xinlei Chen and Michael Rabbat and Nicolas Ballas and Yann LeCun and Amir Bar and Saining Xie},
444
444
  year={2025},
445
445
  eprint={2504.01017},
@@ -464,13 +464,13 @@ webssl_dino7b_full8b_518 = ModelMeta(
464
464
  open_weights=True,
465
465
  public_training_code="",
466
466
  public_training_data=None,
467
- framework=["PyTorch"],
467
+ framework=["PyTorch", "Transformers", "safetensors"],
468
468
  reference="https://huggingface.co/facebook/webssl-dino7b-full8b-518",
469
469
  similarity_fn_name=None,
470
470
  use_instructions=False,
471
471
  training_datasets=webssl_dino_training_datasets,
472
472
  citation="""@article{fan2025scaling,
473
- title={Scaling Language-Free Visual Representation Learning},
473
+ title={Scaling Language-Free Visual Representation Learning},
474
474
  author={David Fan and Shengbang Tong and Jiachen Zhu and Koustuv Sinha and Zhuang Liu and Xinlei Chen and Michael Rabbat and Nicolas Ballas and Yann LeCun and Amir Bar and Saining Xie},
475
475
  year={2025},
476
476
  eprint={2504.01017},
@@ -496,13 +496,13 @@ webssl_dino2b_light2b = ModelMeta(
496
496
  open_weights=True,
497
497
  public_training_code="",
498
498
  public_training_data=None,
499
- framework=["PyTorch"],
499
+ framework=["PyTorch", "Transformers", "safetensors"],
500
500
  reference="https://huggingface.co/facebook/webssl-dino2b-light2b-224",
501
501
  similarity_fn_name=None,
502
502
  use_instructions=False,
503
503
  training_datasets=webssl_dino_training_datasets,
504
504
  citation="""@article{fan2025scaling,
505
- title={Scaling Language-Free Visual Representation Learning},
505
+ title={Scaling Language-Free Visual Representation Learning},
506
506
  author={David Fan and Shengbang Tong and Jiachen Zhu and Koustuv Sinha and Zhuang Liu and Xinlei Chen and Michael Rabbat and Nicolas Ballas and Yann LeCun and Amir Bar and Saining Xie},
507
507
  year={2025},
508
508
  eprint={2504.01017},
@@ -527,13 +527,13 @@ webssl_dino2b_heavy2b = ModelMeta(
527
527
  open_weights=True,
528
528
  public_training_code="",
529
529
  public_training_data=None,
530
- framework=["PyTorch"],
530
+ framework=["PyTorch", "Transformers", "safetensors"],
531
531
  reference="https://huggingface.co/facebook/webssl-dino2b-heavy2b-224",
532
532
  similarity_fn_name=None,
533
533
  use_instructions=False,
534
534
  training_datasets=webssl_dino_training_datasets,
535
535
  citation="""@article{fan2025scaling,
536
- title={Scaling Language-Free Visual Representation Learning},
536
+ title={Scaling Language-Free Visual Representation Learning},
537
537
  author={David Fan and Shengbang Tong and Jiachen Zhu and Koustuv Sinha and Zhuang Liu and Xinlei Chen and Michael Rabbat and Nicolas Ballas and Yann LeCun and Amir Bar and Saining Xie},
538
538
  year={2025},
539
539
  eprint={2504.01017},
@@ -558,13 +558,13 @@ webssl_dino3b_light2b = ModelMeta(
558
558
  open_weights=True,
559
559
  public_training_code="",
560
560
  public_training_data=None,
561
- framework=["PyTorch"],
561
+ framework=["PyTorch", "Transformers", "safetensors"],
562
562
  reference="https://huggingface.co/facebook/webssl-dino3b-light2b-224",
563
563
  similarity_fn_name=None,
564
564
  use_instructions=False,
565
565
  training_datasets=webssl_dino_training_datasets,
566
566
  citation="""@article{fan2025scaling,
567
- title={Scaling Language-Free Visual Representation Learning},
567
+ title={Scaling Language-Free Visual Representation Learning},
568
568
  author={David Fan and Shengbang Tong and Jiachen Zhu and Koustuv Sinha and Zhuang Liu and Xinlei Chen and Michael Rabbat and Nicolas Ballas and Yann LeCun and Amir Bar and Saining Xie},
569
569
  year={2025},
570
570
  eprint={2504.01017},
@@ -589,13 +589,13 @@ webssl_dino3b_heavy2b = ModelMeta(
589
589
  open_weights=True,
590
590
  public_training_code="",
591
591
  public_training_data=None,
592
- framework=["PyTorch"],
592
+ framework=["PyTorch", "Transformers", "safetensors"],
593
593
  reference="https://huggingface.co/facebook/webssl-dino3b-heavy2b-224",
594
594
  similarity_fn_name=None,
595
595
  use_instructions=False,
596
596
  training_datasets=webssl_dino_training_datasets,
597
597
  citation="""@article{fan2025scaling,
598
- title={Scaling Language-Free Visual Representation Learning},
598
+ title={Scaling Language-Free Visual Representation Learning},
599
599
  author={David Fan and Shengbang Tong and Jiachen Zhu and Koustuv Sinha and Zhuang Liu and Xinlei Chen and Michael Rabbat and Nicolas Ballas and Yann LeCun and Amir Bar and Saining Xie},
600
600
  year={2025},
601
601
  eprint={2504.01017},
@@ -620,13 +620,13 @@ webssl_mae300m_full2b = ModelMeta(
620
620
  open_weights=True,
621
621
  public_training_code="",
622
622
  public_training_data=None,
623
- framework=["PyTorch"],
623
+ framework=["PyTorch", "Transformers", "safetensors"],
624
624
  reference="https://huggingface.co/facebook/webssl-mae300m-full2b-224",
625
625
  similarity_fn_name=None,
626
626
  use_instructions=False,
627
627
  training_datasets=webssl_dino_training_datasets,
628
628
  citation="""@article{fan2025scaling,
629
- title={Scaling Language-Free Visual Representation Learning},
629
+ title={Scaling Language-Free Visual Representation Learning},
630
630
  author={David Fan and Shengbang Tong and Jiachen Zhu and Koustuv Sinha and Zhuang Liu and Xinlei Chen and Michael Rabbat and Nicolas Ballas and Yann LeCun and Amir Bar and Saining Xie},
631
631
  year={2025},
632
632
  eprint={2504.01017},
@@ -651,13 +651,13 @@ webssl_mae700m_full2b = ModelMeta(
651
651
  open_weights=True,
652
652
  public_training_code="",
653
653
  public_training_data=None,
654
- framework=["PyTorch"],
654
+ framework=["PyTorch", "Transformers", "safetensors"],
655
655
  reference="https://huggingface.co/facebook/webssl-mae700m-full2b-224",
656
656
  similarity_fn_name=None,
657
657
  use_instructions=False,
658
658
  training_datasets=webssl_dino_training_datasets,
659
659
  citation="""@article{fan2025scaling,
660
- title={Scaling Language-Free Visual Representation Learning},
660
+ title={Scaling Language-Free Visual Representation Learning},
661
661
  author={David Fan and Shengbang Tong and Jiachen Zhu and Koustuv Sinha and Zhuang Liu and Xinlei Chen and Michael Rabbat and Nicolas Ballas and Yann LeCun and Amir Bar and Saining Xie},
662
662
  year={2025},
663
663
  eprint={2504.01017},
@@ -682,13 +682,13 @@ webssl_mae1b_full2b = ModelMeta(
682
682
  open_weights=True,
683
683
  public_training_code="",
684
684
  public_training_data=None,
685
- framework=["PyTorch"],
685
+ framework=["PyTorch", "Transformers", "safetensors"],
686
686
  reference="https://huggingface.co/facebook/webssl-mae1b-full2b-224",
687
687
  similarity_fn_name=None,
688
688
  use_instructions=False,
689
689
  training_datasets=webssl_dino_training_datasets,
690
690
  citation="""@article{fan2025scaling,
691
- title={Scaling Language-Free Visual Representation Learning},
691
+ title={Scaling Language-Free Visual Representation Learning},
692
692
  author={David Fan and Shengbang Tong and Jiachen Zhu and Koustuv Sinha and Zhuang Liu and Xinlei Chen and Michael Rabbat and Nicolas Ballas and Yann LeCun and Amir Bar and Saining Xie},
693
693
  year={2025},
694
694
  eprint={2504.01017},
@@ -45,7 +45,14 @@ e5_instruct = ModelMeta(
45
45
  open_weights=True,
46
46
  revision="baa7be480a7de1539afce709c8f13f833a510e0a",
47
47
  release_date=E5_PAPER_RELEASE_DATE,
48
- framework=["GritLM", "PyTorch", "Sentence Transformers"],
48
+ framework=[
49
+ "GritLM",
50
+ "PyTorch",
51
+ "Sentence Transformers",
52
+ "ONNX",
53
+ "safetensors",
54
+ "Transformers",
55
+ ],
49
56
  similarity_fn_name=ScoringFunction.COSINE,
50
57
  use_instructions=True,
51
58
  reference="https://huggingface.co/intfloat/multilingual-e5-large-instruct",
@@ -84,7 +91,13 @@ e5_mistral = ModelMeta(
84
91
  open_weights=True,
85
92
  revision="07163b72af1488142a360786df853f237b1a3ca1",
86
93
  release_date=E5_PAPER_RELEASE_DATE,
87
- framework=["GritLM", "PyTorch", "Sentence Transformers"],
94
+ framework=[
95
+ "GritLM",
96
+ "PyTorch",
97
+ "Sentence Transformers",
98
+ "safetensors",
99
+ "Transformers",
100
+ ],
88
101
  similarity_fn_name=ScoringFunction.COSINE,
89
102
  use_instructions=True,
90
103
  reference="https://huggingface.co/intfloat/e5-mistral-7b-instruct",
@@ -139,7 +152,13 @@ zeta_alpha_ai__zeta_alpha_e5_mistral = ModelMeta(
139
152
  open_weights=True,
140
153
  public_training_data=None,
141
154
  public_training_code=None,
142
- framework=["PyTorch", "Sentence Transformers", "GritLM"],
155
+ framework=[
156
+ "PyTorch",
157
+ "Sentence Transformers",
158
+ "GritLM",
159
+ "safetensors",
160
+ "Transformers",
161
+ ],
143
162
  reference="https://huggingface.co/zeta-alpha-ai/Zeta-Alpha-E5-Mistral",
144
163
  similarity_fn_name=ScoringFunction.COSINE,
145
164
  use_instructions=True,
@@ -216,7 +235,7 @@ BeastyZ__e5_R_mistral_7b = ModelMeta(
216
235
  open_weights=True,
217
236
  public_training_code="https://github.com/LeeSureman/E5-Retrieval-Reproduction",
218
237
  public_training_data="https://huggingface.co/datasets/BeastyZ/E5-R",
219
- framework=["PyTorch"],
238
+ framework=["PyTorch", "Transformers", "safetensors"],
220
239
  reference="https://huggingface.co/BeastyZ/e5-R-mistral-7b",
221
240
  similarity_fn_name="cosine",
222
241
  use_instructions=True,