mteb 2.6.4__py3-none-any.whl → 2.6.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (142) hide show
  1. mteb/abstasks/classification.py +2 -3
  2. mteb/abstasks/multilabel_classification.py +3 -3
  3. mteb/abstasks/regression.py +1 -1
  4. mteb/abstasks/retrieval.py +1 -1
  5. mteb/abstasks/task_metadata.py +9 -14
  6. mteb/descriptive_stats/Retrieval/NanoClimateFEVER-VN.json +30 -0
  7. mteb/descriptive_stats/Retrieval/NanoDBPedia-VN.json +30 -0
  8. mteb/descriptive_stats/Retrieval/NanoFEVER-VN.json +30 -0
  9. mteb/descriptive_stats/Retrieval/NanoHotpotQA-VN.json +30 -0
  10. mteb/descriptive_stats/Retrieval/NanoMSMARCO-VN.json +30 -0
  11. mteb/descriptive_stats/Retrieval/NanoNQ-VN.json +30 -0
  12. mteb/descriptive_stats/Retrieval/TVPLRetrieval.json +30 -0
  13. mteb/models/model_implementations/align_models.py +1 -1
  14. mteb/models/model_implementations/andersborges.py +2 -2
  15. mteb/models/model_implementations/ara_models.py +1 -1
  16. mteb/models/model_implementations/arctic_models.py +8 -8
  17. mteb/models/model_implementations/b1ade_models.py +1 -1
  18. mteb/models/model_implementations/bge_models.py +45 -21
  19. mteb/models/model_implementations/bica_model.py +3 -3
  20. mteb/models/model_implementations/blip2_models.py +2 -2
  21. mteb/models/model_implementations/blip_models.py +8 -8
  22. mteb/models/model_implementations/bmretriever_models.py +4 -4
  23. mteb/models/model_implementations/cadet_models.py +1 -1
  24. mteb/models/model_implementations/cde_models.py +2 -2
  25. mteb/models/model_implementations/clip_models.py +3 -3
  26. mteb/models/model_implementations/clips_models.py +3 -3
  27. mteb/models/model_implementations/codefuse_models.py +5 -5
  28. mteb/models/model_implementations/codesage_models.py +3 -3
  29. mteb/models/model_implementations/cohere_models.py +4 -4
  30. mteb/models/model_implementations/colpali_models.py +3 -3
  31. mteb/models/model_implementations/colqwen_models.py +8 -8
  32. mteb/models/model_implementations/colsmol_models.py +2 -2
  33. mteb/models/model_implementations/conan_models.py +1 -1
  34. mteb/models/model_implementations/dino_models.py +19 -19
  35. mteb/models/model_implementations/e5_instruct.py +23 -4
  36. mteb/models/model_implementations/e5_models.py +9 -9
  37. mteb/models/model_implementations/e5_v.py +1 -1
  38. mteb/models/model_implementations/eagerworks_models.py +1 -1
  39. mteb/models/model_implementations/emillykkejensen_models.py +3 -3
  40. mteb/models/model_implementations/en_code_retriever.py +1 -1
  41. mteb/models/model_implementations/euler_models.py +2 -2
  42. mteb/models/model_implementations/fa_models.py +9 -9
  43. mteb/models/model_implementations/facebookai.py +14 -2
  44. mteb/models/model_implementations/geogpt_models.py +1 -1
  45. mteb/models/model_implementations/gme_v_models.py +2 -2
  46. mteb/models/model_implementations/google_models.py +1 -1
  47. mteb/models/model_implementations/granite_vision_embedding_models.py +1 -1
  48. mteb/models/model_implementations/gritlm_models.py +2 -2
  49. mteb/models/model_implementations/gte_models.py +25 -13
  50. mteb/models/model_implementations/hinvec_models.py +1 -1
  51. mteb/models/model_implementations/ibm_granite_models.py +30 -6
  52. mteb/models/model_implementations/inf_models.py +2 -2
  53. mteb/models/model_implementations/jasper_models.py +2 -2
  54. mteb/models/model_implementations/jina_clip.py +1 -1
  55. mteb/models/model_implementations/jina_models.py +11 -5
  56. mteb/models/model_implementations/kblab.py +12 -6
  57. mteb/models/model_implementations/kennethenevoldsen_models.py +2 -2
  58. mteb/models/model_implementations/kfst.py +1 -1
  59. mteb/models/model_implementations/kowshik24_models.py +1 -1
  60. mteb/models/model_implementations/lgai_embedding_models.py +1 -1
  61. mteb/models/model_implementations/linq_models.py +1 -1
  62. mteb/models/model_implementations/listconranker.py +1 -1
  63. mteb/models/model_implementations/llm2clip_models.py +3 -3
  64. mteb/models/model_implementations/llm2vec_models.py +8 -8
  65. mteb/models/model_implementations/mdbr_models.py +14 -2
  66. mteb/models/model_implementations/misc_models.py +68 -68
  67. mteb/models/model_implementations/mme5_models.py +1 -1
  68. mteb/models/model_implementations/moco_models.py +2 -2
  69. mteb/models/model_implementations/mod_models.py +1 -1
  70. mteb/models/model_implementations/model2vec_models.py +13 -13
  71. mteb/models/model_implementations/moka_models.py +1 -1
  72. mteb/models/model_implementations/mxbai_models.py +16 -3
  73. mteb/models/model_implementations/nbailab.py +3 -3
  74. mteb/models/model_implementations/no_instruct_sentence_models.py +1 -1
  75. mteb/models/model_implementations/nomic_models.py +18 -6
  76. mteb/models/model_implementations/nomic_models_vision.py +1 -1
  77. mteb/models/model_implementations/nvidia_llama_nemoretriever_colemb.py +2 -2
  78. mteb/models/model_implementations/nvidia_models.py +3 -3
  79. mteb/models/model_implementations/octen_models.py +3 -3
  80. mteb/models/model_implementations/openclip_models.py +6 -6
  81. mteb/models/model_implementations/opensearch_neural_sparse_models.py +5 -5
  82. mteb/models/model_implementations/ops_moa_models.py +1 -1
  83. mteb/models/model_implementations/ordalietech_solon_embeddings_mini_beta_1_1.py +1 -1
  84. mteb/models/model_implementations/pawan_models.py +1 -1
  85. mteb/models/model_implementations/piccolo_models.py +1 -1
  86. mteb/models/model_implementations/promptriever_models.py +4 -4
  87. mteb/models/model_implementations/pylate_models.py +5 -5
  88. mteb/models/model_implementations/qodo_models.py +2 -2
  89. mteb/models/model_implementations/qtack_models.py +1 -1
  90. mteb/models/model_implementations/qwen3_models.py +3 -3
  91. mteb/models/model_implementations/qzhou_models.py +2 -2
  92. mteb/models/model_implementations/rasgaard_models.py +1 -1
  93. mteb/models/model_implementations/reasonir_model.py +1 -1
  94. mteb/models/model_implementations/repllama_models.py +1 -1
  95. mteb/models/model_implementations/rerankers_custom.py +9 -3
  96. mteb/models/model_implementations/rerankers_monot5_based.py +14 -14
  97. mteb/models/model_implementations/richinfoai_models.py +1 -1
  98. mteb/models/model_implementations/ru_sentence_models.py +20 -20
  99. mteb/models/model_implementations/ruri_models.py +10 -10
  100. mteb/models/model_implementations/salesforce_models.py +3 -3
  101. mteb/models/model_implementations/samilpwc_models.py +1 -1
  102. mteb/models/model_implementations/sarashina_embedding_models.py +2 -2
  103. mteb/models/model_implementations/searchmap_models.py +1 -1
  104. mteb/models/model_implementations/sentence_transformers_models.py +58 -22
  105. mteb/models/model_implementations/shuu_model.py +1 -1
  106. mteb/models/model_implementations/siglip_models.py +10 -10
  107. mteb/models/model_implementations/slm_models.py +416 -0
  108. mteb/models/model_implementations/spartan8806_atles_champion.py +1 -1
  109. mteb/models/model_implementations/stella_models.py +17 -4
  110. mteb/models/model_implementations/tarka_models.py +2 -2
  111. mteb/models/model_implementations/text2vec_models.py +9 -3
  112. mteb/models/model_implementations/ua_sentence_models.py +1 -1
  113. mteb/models/model_implementations/uae_models.py +7 -1
  114. mteb/models/model_implementations/vdr_models.py +1 -1
  115. mteb/models/model_implementations/vi_vn_models.py +6 -6
  116. mteb/models/model_implementations/vlm2vec_models.py +2 -2
  117. mteb/models/model_implementations/youtu_models.py +1 -1
  118. mteb/models/model_implementations/yuan_models.py +1 -1
  119. mteb/models/model_implementations/yuan_models_en.py +1 -1
  120. mteb/models/model_meta.py +46 -17
  121. mteb/results/benchmark_results.py +2 -2
  122. mteb/tasks/classification/kur/kurdish_sentiment_classification.py +2 -2
  123. mteb/tasks/clustering/eng/hume_wiki_cities_clustering.py +1 -1
  124. mteb/tasks/clustering/eng/wiki_cities_clustering.py +1 -1
  125. mteb/tasks/clustering/zho/cmteb_clustering.py +2 -2
  126. mteb/tasks/reranking/multilingual/wikipedia_reranking_multilingual.py +1 -1
  127. mteb/tasks/retrieval/eng/cub200_i2i_retrieval.py +1 -1
  128. mteb/tasks/retrieval/vie/__init__.py +14 -6
  129. mteb/tasks/retrieval/vie/climate_fevervn_retrieval.py +39 -0
  130. mteb/tasks/retrieval/vie/db_pedia_vn_retrieval.py +39 -0
  131. mteb/tasks/retrieval/vie/fevervn_retrieval.py +39 -0
  132. mteb/tasks/retrieval/vie/hotpot_qavn_retrieval.py +39 -0
  133. mteb/tasks/retrieval/vie/msmarcovn_retrieval.py +48 -0
  134. mteb/tasks/retrieval/vie/nqvn_retrieval.py +39 -0
  135. mteb/tasks/retrieval/vie/tvpl_retrieval.py +42 -0
  136. mteb/tasks/retrieval/vie/zac_legal_text_retrieval.py +15 -1
  137. {mteb-2.6.4.dist-info → mteb-2.6.6.dist-info}/METADATA +3 -3
  138. {mteb-2.6.4.dist-info → mteb-2.6.6.dist-info}/RECORD +142 -133
  139. {mteb-2.6.4.dist-info → mteb-2.6.6.dist-info}/WHEEL +0 -0
  140. {mteb-2.6.4.dist-info → mteb-2.6.6.dist-info}/entry_points.txt +0 -0
  141. {mteb-2.6.4.dist-info → mteb-2.6.6.dist-info}/licenses/LICENSE +0 -0
  142. {mteb-2.6.4.dist-info → mteb-2.6.6.dist-info}/top_level.txt +0 -0
@@ -331,7 +331,13 @@ bge_small_en_v1_5 = ModelMeta(
331
331
  max_tokens=512,
332
332
  reference="https://huggingface.co/BAAI/bge-small-en-v1.5",
333
333
  similarity_fn_name=ScoringFunction.COSINE,
334
- framework=["Sentence Transformers", "PyTorch"],
334
+ framework=[
335
+ "Sentence Transformers",
336
+ "PyTorch",
337
+ "ONNX",
338
+ "safetensors",
339
+ "Transformers",
340
+ ],
335
341
  use_instructions=True,
336
342
  public_training_code=None,
337
343
  public_training_data="https://data.baai.ac.cn/details/BAAI-MTP",
@@ -357,7 +363,13 @@ bge_base_en_v1_5 = ModelMeta(
357
363
  max_tokens=512,
358
364
  reference="https://huggingface.co/BAAI/bge-base-en-v1.5",
359
365
  similarity_fn_name=ScoringFunction.COSINE,
360
- framework=["Sentence Transformers", "PyTorch"],
366
+ framework=[
367
+ "Sentence Transformers",
368
+ "PyTorch",
369
+ "ONNX",
370
+ "safetensors",
371
+ "Transformers",
372
+ ],
361
373
  use_instructions=True,
362
374
  public_training_code=None, # seemingly released (at least for some models, but the link is broken
363
375
  public_training_data="https://data.baai.ac.cn/details/BAAI-MTP",
@@ -383,7 +395,13 @@ bge_large_en_v1_5 = ModelMeta(
383
395
  max_tokens=512,
384
396
  reference="https://huggingface.co/BAAI/bge-large-en-v1.5",
385
397
  similarity_fn_name=ScoringFunction.COSINE,
386
- framework=["Sentence Transformers", "PyTorch"],
398
+ framework=[
399
+ "Sentence Transformers",
400
+ "PyTorch",
401
+ "ONNX",
402
+ "safetensors",
403
+ "Transformers",
404
+ ],
387
405
  use_instructions=True,
388
406
  citation=BGE_15_CITATION,
389
407
  public_training_code=None, # seemingly released (at least for some models, but the link is broken
@@ -409,7 +427,7 @@ bge_small_zh = ModelMeta(
409
427
  max_tokens=512,
410
428
  reference="https://huggingface.co/BAAI/bge-small-zh",
411
429
  similarity_fn_name=ScoringFunction.COSINE,
412
- framework=["Sentence Transformers", "PyTorch"],
430
+ framework=["Sentence Transformers", "PyTorch", "Transformers"],
413
431
  use_instructions=True,
414
432
  public_training_code=None,
415
433
  public_training_data=None,
@@ -436,7 +454,7 @@ bge_base_zh = ModelMeta(
436
454
  max_tokens=512,
437
455
  reference="https://huggingface.co/BAAI/bge-base-zh",
438
456
  similarity_fn_name=ScoringFunction.COSINE,
439
- framework=["Sentence Transformers", "PyTorch"],
457
+ framework=["Sentence Transformers", "PyTorch", "Transformers", "safetensors"],
440
458
  use_instructions=True,
441
459
  public_training_code=None,
442
460
  public_training_data=None,
@@ -463,7 +481,7 @@ bge_large_zh = ModelMeta(
463
481
  max_tokens=512,
464
482
  reference="https://huggingface.co/BAAI/bge-large-zh",
465
483
  similarity_fn_name=ScoringFunction.COSINE,
466
- framework=["Sentence Transformers", "PyTorch"],
484
+ framework=["Sentence Transformers", "PyTorch", "Transformers", "safetensors"],
467
485
  use_instructions=True,
468
486
  public_training_code=None,
469
487
  public_training_data=None,
@@ -490,7 +508,7 @@ bge_small_en = ModelMeta(
490
508
  max_tokens=512,
491
509
  reference="https://huggingface.co/BAAI/bge-small-en",
492
510
  similarity_fn_name=ScoringFunction.COSINE,
493
- framework=["Sentence Transformers", "PyTorch"],
511
+ framework=["Sentence Transformers", "PyTorch", "Transformers", "safetensors"],
494
512
  use_instructions=True,
495
513
  public_training_code=None,
496
514
  public_training_data="https://data.baai.ac.cn/details/BAAI-MTP",
@@ -517,7 +535,13 @@ bge_base_en = ModelMeta(
517
535
  max_tokens=512,
518
536
  reference="https://huggingface.co/BAAI/bge-base-en",
519
537
  similarity_fn_name=ScoringFunction.COSINE,
520
- framework=["Sentence Transformers", "PyTorch"],
538
+ framework=[
539
+ "Sentence Transformers",
540
+ "PyTorch",
541
+ "Transformers",
542
+ "ONNX",
543
+ "safetensors",
544
+ ],
521
545
  use_instructions=True,
522
546
  public_training_code=None, # seemingly released (at least for some models, but the link is broken
523
547
  public_training_data="https://data.baai.ac.cn/details/BAAI-MTP",
@@ -544,7 +568,7 @@ bge_large_en = ModelMeta(
544
568
  max_tokens=512,
545
569
  reference="https://huggingface.co/BAAI/bge-large-en",
546
570
  similarity_fn_name=ScoringFunction.COSINE,
547
- framework=["Sentence Transformers", "PyTorch"],
571
+ framework=["Sentence Transformers", "PyTorch", "Transformers", "safetensors"],
548
572
  use_instructions=True,
549
573
  public_training_code=None, # seemingly released (at least for some models, but the link is broken
550
574
  public_training_data="https://data.baai.ac.cn/details/BAAI-MTP",
@@ -572,7 +596,7 @@ bge_small_zh_v1_5 = ModelMeta(
572
596
  max_tokens=512,
573
597
  reference="https://huggingface.co/BAAI/bge-small-zh-v1.5",
574
598
  similarity_fn_name=ScoringFunction.COSINE,
575
- framework=["Sentence Transformers", "PyTorch"],
599
+ framework=["Sentence Transformers", "PyTorch", "Transformers", "safetensors"],
576
600
  use_instructions=True,
577
601
  public_training_code=None,
578
602
  public_training_data=None,
@@ -598,7 +622,7 @@ bge_base_zh_v1_5 = ModelMeta(
598
622
  max_tokens=512,
599
623
  reference="https://huggingface.co/BAAI/bge-base-zh-v1.5",
600
624
  similarity_fn_name=ScoringFunction.COSINE,
601
- framework=["Sentence Transformers", "PyTorch"],
625
+ framework=["Sentence Transformers", "PyTorch", "Transformers"],
602
626
  use_instructions=True,
603
627
  public_training_code=None,
604
628
  public_training_data=None,
@@ -624,7 +648,7 @@ bge_large_zh_v1_5 = ModelMeta(
624
648
  max_tokens=512,
625
649
  reference="https://huggingface.co/BAAI/bge-large-zh-v1.5",
626
650
  similarity_fn_name=ScoringFunction.COSINE,
627
- framework=["Sentence Transformers", "PyTorch"],
651
+ framework=["Sentence Transformers", "PyTorch", "Transformers"],
628
652
  use_instructions=True,
629
653
  public_training_code=None,
630
654
  public_training_data=None,
@@ -647,13 +671,13 @@ bge_m3 = ModelMeta(
647
671
  max_tokens=8194,
648
672
  reference="https://huggingface.co/BAAI/bge-m3",
649
673
  similarity_fn_name=ScoringFunction.COSINE,
650
- framework=["Sentence Transformers", "PyTorch"],
674
+ framework=["Sentence Transformers", "PyTorch", "ONNX"],
651
675
  use_instructions=False,
652
676
  public_training_code=None,
653
677
  public_training_data="https://huggingface.co/datasets/cfli/bge-full-data",
654
678
  training_datasets=bge_m3_training_data,
655
679
  citation="""@misc{bge-m3,
656
- title={BGE M3-Embedding: Multi-Lingual, Multi-Functionality, Multi-Granularity Text Embeddings Through Self-Knowledge Distillation},
680
+ title={BGE M3-Embedding: Multi-Lingual, Multi-Functionality, Multi-Granularity Text Embeddings Through Self-Knowledge Distillation},
657
681
  author={Jianlv Chen and Shitao Xiao and Peitian Zhang and Kun Luo and Defu Lian and Zheng Liu},
658
682
  year={2024},
659
683
  eprint={2402.03216},
@@ -743,7 +767,7 @@ bge_multilingual_gemma2 = ModelMeta(
743
767
  max_tokens=8192, # from old C-MTEB leaderboard
744
768
  reference="https://huggingface.co/BAAI/bge-multilingual-gemma2",
745
769
  similarity_fn_name=ScoringFunction.COSINE,
746
- framework=["Sentence Transformers", "PyTorch"],
770
+ framework=["Sentence Transformers", "PyTorch", "safetensors", "Transformers"],
747
771
  use_instructions=False,
748
772
  public_training_code=None,
749
773
  public_training_data=None,
@@ -754,7 +778,7 @@ bge_multilingual_gemma2 = ModelMeta(
754
778
  | bge_full_data
755
779
  | bge_m3_training_data,
756
780
  citation="""@misc{bge-m3,
757
- title={BGE M3-Embedding: Multi-Lingual, Multi-Functionality, Multi-Granularity Text Embeddings Through Self-Knowledge Distillation},
781
+ title={BGE M3-Embedding: Multi-Lingual, Multi-Functionality, Multi-Granularity Text Embeddings Through Self-Knowledge Distillation},
758
782
  author={Jianlv Chen and Shitao Xiao and Peitian Zhang and Kun Luo and Defu Lian and Zheng Liu},
759
783
  year={2024},
760
784
  eprint={2402.03216},
@@ -764,7 +788,7 @@ bge_multilingual_gemma2 = ModelMeta(
764
788
 
765
789
 
766
790
  @misc{bge_embedding,
767
- title={C-Pack: Packaged Resources To Advance General Chinese Embedding},
791
+ title={C-Pack: Packaged Resources To Advance General Chinese Embedding},
768
792
  author={Shitao Xiao and Zheng Liu and Peitian Zhang and Niklas Muennighoff},
769
793
  year={2023},
770
794
  eprint={2309.07597},
@@ -790,7 +814,7 @@ bge_en_icl = ModelMeta(
790
814
  max_tokens=32768,
791
815
  reference="https://huggingface.co/BAAI/bge-en-icl",
792
816
  similarity_fn_name=ScoringFunction.COSINE,
793
- framework=["Sentence Transformers", "PyTorch"],
817
+ framework=["Sentence Transformers", "PyTorch", "safetensors", "Transformers"],
794
818
  use_instructions=False,
795
819
  public_training_code="https://github.com/FlagOpen/FlagEmbedding",
796
820
  public_training_data="https://huggingface.co/datasets/cfli/bge-full-data",
@@ -824,13 +848,13 @@ bge_m3_unsupervised = ModelMeta(
824
848
  max_tokens=8192,
825
849
  reference="https://huggingface.co/BAAI/bge-m3-unsupervised",
826
850
  similarity_fn_name="cosine",
827
- framework=["Sentence Transformers", "PyTorch"],
851
+ framework=["Sentence Transformers", "PyTorch", "safetensors"],
828
852
  use_instructions=False,
829
853
  public_training_code="https://github.com/FlagOpen/FlagEmbedding",
830
854
  public_training_data="https://huggingface.co/datasets/cfli/bge-full-data",
831
855
  training_datasets=bge_m3_training_data,
832
856
  citation="""@misc{bge-m3,
833
- title={BGE M3-Embedding: Multi-Lingual, Multi-Functionality, Multi-Granularity Text Embeddings Through Self-Knowledge Distillation},
857
+ title={BGE M3-Embedding: Multi-Lingual, Multi-Functionality, Multi-Granularity Text Embeddings Through Self-Knowledge Distillation},
834
858
  author={Jianlv Chen and Shitao Xiao and Peitian Zhang and Kun Luo and Defu Lian and Zheng Liu},
835
859
  year={2024},
836
860
  eprint={2402.03216},
@@ -854,7 +878,7 @@ manu__bge_m3_custom_fr = ModelMeta(
854
878
  open_weights=True,
855
879
  public_training_code=None,
856
880
  public_training_data=None,
857
- framework=["PyTorch", "Sentence Transformers"],
881
+ framework=["PyTorch", "Sentence Transformers", "safetensors"],
858
882
  reference="https://huggingface.co/manu/bge-m3-custom-fr",
859
883
  similarity_fn_name=ScoringFunction.COSINE,
860
884
  use_instructions=None,
@@ -15,20 +15,20 @@ bica_base = ModelMeta(
15
15
  max_tokens=512,
16
16
  reference="https://huggingface.co/bisectgroup/BiCA-base",
17
17
  similarity_fn_name="cosine",
18
- framework=["Sentence Transformers", "PyTorch"],
18
+ framework=["Sentence Transformers", "PyTorch", "safetensors"],
19
19
  use_instructions=False,
20
20
  public_training_code="https://github.com/NiravBhattLab/BiCA",
21
21
  public_training_data="https://huggingface.co/datasets/bisectgroup/hard-negatives-traversal",
22
22
  adapted_from="thenlper/gte-base",
23
23
  citation="""
24
24
  @misc{sinha2025bicaeffectivebiomedicaldense,
25
- title={BiCA: Effective Biomedical Dense Retrieval with Citation-Aware Hard Negatives},
25
+ title={BiCA: Effective Biomedical Dense Retrieval with Citation-Aware Hard Negatives},
26
26
  author={Aarush Sinha and Pavan Kumar S and Roshan Balaji and Nirav Pravinbhai Bhatt},
27
27
  year={2025},
28
28
  eprint={2511.08029},
29
29
  archivePrefix={arXiv},
30
30
  primaryClass={cs.IR},
31
- url={https://arxiv.org/abs/2511.08029},
31
+ url={https://arxiv.org/abs/2511.08029},
32
32
  }
33
33
  """,
34
34
  training_datasets=set(),
@@ -179,7 +179,7 @@ blip2_opt_2_7b = ModelMeta(
179
179
  open_weights=True,
180
180
  public_training_code="https://github.com/salesforce/LAVIS/tree/main/projects/blip2",
181
181
  public_training_data=None,
182
- framework=["PyTorch"],
182
+ framework=["PyTorch", "Transformers", "safetensors"],
183
183
  reference="https://huggingface.co/Salesforce/blip2-opt-2.7b",
184
184
  similarity_fn_name=ScoringFunction.COSINE,
185
185
  use_instructions=False,
@@ -203,7 +203,7 @@ blip2_opt_6_7b_coco = ModelMeta(
203
203
  open_weights=True,
204
204
  public_training_code="https://github.com/salesforce/LAVIS/tree/main/projects/blip2",
205
205
  public_training_data=None,
206
- framework=["PyTorch"],
206
+ framework=["PyTorch", "Transformers", "safetensors"],
207
207
  reference="https://huggingface.co/Salesforce/blip2-opt-6.7b-coco",
208
208
  similarity_fn_name=ScoringFunction.COSINE,
209
209
  use_instructions=False,
@@ -143,7 +143,7 @@ blip_image_captioning_large = ModelMeta(
143
143
  open_weights=True,
144
144
  public_training_code="https://github.com/salesforce/BLIP",
145
145
  public_training_data="https://github.com/salesforce/BLIP",
146
- framework=["PyTorch"],
146
+ framework=["PyTorch", "Transformers", "safetensors"],
147
147
  reference="https://huggingface.co/Salesforce/blip-image-captioning-large",
148
148
  similarity_fn_name=ScoringFunction.COSINE,
149
149
  use_instructions=False,
@@ -171,7 +171,7 @@ blip_image_captioning_base = ModelMeta(
171
171
  open_weights=True,
172
172
  public_training_code="https://github.com/salesforce/BLIP",
173
173
  public_training_data="https://github.com/salesforce/BLIP",
174
- framework=["PyTorch"],
174
+ framework=["PyTorch", "Transformers"],
175
175
  reference="https://huggingface.co/Salesforce/blip-image-captioning-base",
176
176
  similarity_fn_name=ScoringFunction.COSINE,
177
177
  use_instructions=False,
@@ -200,7 +200,7 @@ blip_vqa_base = ModelMeta(
200
200
  open_weights=True,
201
201
  public_training_code="https://github.com/salesforce/BLIP",
202
202
  public_training_data="https://github.com/salesforce/BLIP",
203
- framework=["PyTorch"],
203
+ framework=["PyTorch", "Transformers", "safetensors"],
204
204
  reference="https://huggingface.co/Salesforce/blip-vqa-base",
205
205
  similarity_fn_name=ScoringFunction.COSINE,
206
206
  use_instructions=False,
@@ -227,7 +227,7 @@ blip_vqa_capfilt_large = ModelMeta(
227
227
  open_weights=True,
228
228
  public_training_code="https://github.com/salesforce/BLIP",
229
229
  public_training_data="https://github.com/salesforce/BLIP",
230
- framework=["PyTorch"],
230
+ framework=["PyTorch", "Transformers"],
231
231
  reference="https://huggingface.co/Salesforce/blip-vqa-capfilt-large",
232
232
  similarity_fn_name=ScoringFunction.COSINE,
233
233
  use_instructions=False,
@@ -254,7 +254,7 @@ blip_itm_base_coco = ModelMeta(
254
254
  open_weights=True,
255
255
  public_training_code="https://github.com/salesforce/BLIP",
256
256
  public_training_data="https://github.com/salesforce/BLIP",
257
- framework=["PyTorch"],
257
+ framework=["PyTorch", "Transformers"],
258
258
  reference="https://huggingface.co/Salesforce/blip-itm-base-coco",
259
259
  similarity_fn_name=ScoringFunction.COSINE,
260
260
  use_instructions=False,
@@ -281,7 +281,7 @@ blip_itm_large_coco = ModelMeta(
281
281
  open_weights=True,
282
282
  public_training_code="https://github.com/salesforce/BLIP",
283
283
  public_training_data="https://github.com/salesforce/BLIP",
284
- framework=["PyTorch"],
284
+ framework=["PyTorch", "Transformers"],
285
285
  reference="https://huggingface.co/Salesforce/blip-itm-large-coco",
286
286
  similarity_fn_name=ScoringFunction.COSINE,
287
287
  use_instructions=False,
@@ -309,7 +309,7 @@ blip_itm_base_flickr = ModelMeta(
309
309
  open_weights=True,
310
310
  public_training_code="https://github.com/salesforce/BLIP",
311
311
  public_training_data="https://github.com/salesforce/BLIP",
312
- framework=["PyTorch"],
312
+ framework=["PyTorch", "Transformers"],
313
313
  reference="https://huggingface.co/Salesforce/blip-itm-base-flickr",
314
314
  similarity_fn_name=ScoringFunction.COSINE,
315
315
  use_instructions=False,
@@ -337,7 +337,7 @@ blip_itm_large_flickr = ModelMeta(
337
337
  open_weights=True,
338
338
  public_training_code="https://github.com/salesforce/BLIP",
339
339
  public_training_data="https://github.com/salesforce/BLIP",
340
- framework=["PyTorch"],
340
+ framework=["PyTorch", "Transformers"],
341
341
  reference="https://huggingface.co/Salesforce/blip-itm-large-flickr",
342
342
  similarity_fn_name=ScoringFunction.COSINE,
343
343
  use_instructions=False,
@@ -104,7 +104,7 @@ BMRetriever_410M = ModelMeta(
104
104
  license="mit",
105
105
  reference="https://huggingface.co/BMRetriever/BMRetriever-410M",
106
106
  similarity_fn_name="cosine",
107
- framework=["Sentence Transformers", "PyTorch"],
107
+ framework=["Sentence Transformers", "PyTorch", "Transformers", "safetensors"],
108
108
  use_instructions=True,
109
109
  public_training_code=None,
110
110
  public_training_data=None,
@@ -134,7 +134,7 @@ BMRetriever_1B = ModelMeta(
134
134
  license="mit",
135
135
  reference="https://huggingface.co/BMRetriever/BMRetriever-1B",
136
136
  similarity_fn_name="cosine",
137
- framework=["Sentence Transformers", "PyTorch"],
137
+ framework=["Sentence Transformers", "PyTorch", "Transformers", "safetensors"],
138
138
  use_instructions=True,
139
139
  public_training_code=None,
140
140
  public_training_data=None,
@@ -164,7 +164,7 @@ BMRetriever_2B = ModelMeta(
164
164
  license="mit",
165
165
  reference="https://huggingface.co/BMRetriever/BMRetriever-2B",
166
166
  similarity_fn_name="cosine",
167
- framework=["Sentence Transformers", "PyTorch"],
167
+ framework=["Sentence Transformers", "PyTorch", "Transformers", "safetensors"],
168
168
  use_instructions=True,
169
169
  public_training_code=None,
170
170
  public_training_data=None,
@@ -194,7 +194,7 @@ BMRetriever_7B = ModelMeta(
194
194
  license="mit",
195
195
  reference="https://huggingface.co/BMRetriever/BMRetriever-7B",
196
196
  similarity_fn_name="cosine",
197
- framework=["Sentence Transformers", "PyTorch"],
197
+ framework=["Sentence Transformers", "PyTorch", "Transformers", "safetensors"],
198
198
  use_instructions=True,
199
199
  public_training_code=None,
200
200
  public_training_data=None,
@@ -47,7 +47,7 @@ cadet_embed = ModelMeta(
47
47
  max_tokens=512,
48
48
  reference="https://huggingface.co/manveertamber/cadet-embed-base-v1",
49
49
  similarity_fn_name="cosine",
50
- framework=["Sentence Transformers", "PyTorch"],
50
+ framework=["Sentence Transformers", "PyTorch", "safetensors"],
51
51
  use_instructions=True,
52
52
  public_training_code="https://github.com/manveertamber/cadet-dense-retrieval",
53
53
  # we provide the code to generate the training data
@@ -227,7 +227,7 @@ cde_small_v1 = ModelMeta(
227
227
  embed_dim=768,
228
228
  license="mit",
229
229
  similarity_fn_name=ScoringFunction.COSINE,
230
- framework=["Sentence Transformers"],
230
+ framework=["Sentence Transformers", "safetensors", "Transformers"],
231
231
  reference="https://huggingface.co/jxm/cde-small-v1",
232
232
  use_instructions=True,
233
233
  adapted_from="nomic-ai/nomic-bert-2048",
@@ -256,7 +256,7 @@ cde_small_v2 = ModelMeta(
256
256
  embed_dim=768,
257
257
  license="mit",
258
258
  similarity_fn_name=ScoringFunction.COSINE,
259
- framework=["Sentence Transformers"],
259
+ framework=["Sentence Transformers", "safetensors", "Transformers"],
260
260
  reference="https://huggingface.co/jxm/cde-small-v1",
261
261
  use_instructions=True,
262
262
  adapted_from="answerdotai/ModernBERT-base",
@@ -130,7 +130,7 @@ clip_vit_large_patch14 = ModelMeta(
130
130
  open_weights=True,
131
131
  public_training_code=None,
132
132
  public_training_data=None,
133
- framework=["PyTorch"],
133
+ framework=["PyTorch", "Transformers", "safetensors"],
134
134
  reference="https://huggingface.co/openai/clip-vit-large-patch14",
135
135
  similarity_fn_name=ScoringFunction.COSINE,
136
136
  use_instructions=False,
@@ -154,7 +154,7 @@ clip_vit_base_patch32 = ModelMeta(
154
154
  open_weights=True,
155
155
  public_training_code=None,
156
156
  public_training_data=None,
157
- framework=["PyTorch"],
157
+ framework=["PyTorch", "Transformers"],
158
158
  reference="https://huggingface.co/openai/clip-vit-base-patch32",
159
159
  similarity_fn_name=ScoringFunction.COSINE,
160
160
  use_instructions=False,
@@ -178,7 +178,7 @@ clip_vit_base_patch16 = ModelMeta(
178
178
  open_weights=True,
179
179
  public_training_code=None,
180
180
  public_training_data=None,
181
- framework=["PyTorch"],
181
+ framework=["PyTorch", "Transformers"],
182
182
  reference="https://huggingface.co/openai/clip-vit-base-patch16",
183
183
  similarity_fn_name=ScoringFunction.COSINE,
184
184
  use_instructions=False,
@@ -36,7 +36,7 @@ e5_nl_small = ModelMeta(
36
36
  max_tokens=512,
37
37
  reference="https://huggingface.co/clips/e5-small-trm-nl",
38
38
  similarity_fn_name=ScoringFunction.COSINE,
39
- framework=["Sentence Transformers", "PyTorch"],
39
+ framework=["Sentence Transformers", "PyTorch", "safetensors", "Transformers"],
40
40
  use_instructions=True,
41
41
  public_training_code="https://github.com/ELotfi/e5-nl",
42
42
  public_training_data="https://huggingface.co/collections/clips/beir-nl",
@@ -63,7 +63,7 @@ e5_nl_base = ModelMeta(
63
63
  max_tokens=514,
64
64
  reference="https://huggingface.co/clips/e5-base-trm-nl",
65
65
  similarity_fn_name=ScoringFunction.COSINE,
66
- framework=["Sentence Transformers", "PyTorch"],
66
+ framework=["Sentence Transformers", "PyTorch", "safetensors", "Transformers"],
67
67
  use_instructions=True,
68
68
  public_training_code="https://github.com/ELotfi/e5-nl",
69
69
  public_training_data="https://huggingface.co/collections/clips/beir-nl",
@@ -90,7 +90,7 @@ e5_nl_large = ModelMeta(
90
90
  max_tokens=514,
91
91
  reference="https://huggingface.co/clips/e5-large-trm-nl",
92
92
  similarity_fn_name=ScoringFunction.COSINE,
93
- framework=["Sentence Transformers", "PyTorch"],
93
+ framework=["Sentence Transformers", "PyTorch", "safetensors", "Transformers"],
94
94
  use_instructions=True,
95
95
  public_training_code="https://github.com/ELotfi/e5-nl",
96
96
  public_training_data="https://huggingface.co/collections/clips/beir-nl",
@@ -242,7 +242,7 @@ F2LLM_0B6 = ModelMeta(
242
242
  max_tokens=8192,
243
243
  reference="https://huggingface.co/codefuse-ai/F2LLM-0.6B",
244
244
  similarity_fn_name="cosine",
245
- framework=["Sentence Transformers", "PyTorch"],
245
+ framework=["Sentence Transformers", "PyTorch", "safetensors", "Transformers"],
246
246
  use_instructions=True,
247
247
  public_training_code="https://github.com/codefuse-ai/F2LLM",
248
248
  public_training_data="https://huggingface.co/datasets/codefuse-ai/F2LLM",
@@ -272,7 +272,7 @@ F2LLM_1B7 = ModelMeta(
272
272
  max_tokens=8192,
273
273
  reference="https://huggingface.co/codefuse-ai/F2LLM-1.7B",
274
274
  similarity_fn_name="cosine",
275
- framework=["Sentence Transformers", "PyTorch"],
275
+ framework=["Sentence Transformers", "PyTorch", "safetensors", "Transformers"],
276
276
  use_instructions=True,
277
277
  public_training_code="https://github.com/codefuse-ai/F2LLM",
278
278
  public_training_data="https://huggingface.co/datasets/codefuse-ai/F2LLM",
@@ -302,7 +302,7 @@ F2LLM_4B = ModelMeta(
302
302
  max_tokens=8192,
303
303
  reference="https://huggingface.co/codefuse-ai/F2LLM-4B",
304
304
  similarity_fn_name="cosine",
305
- framework=["Sentence Transformers", "PyTorch"],
305
+ framework=["Sentence Transformers", "PyTorch", "safetensors", "Transformers"],
306
306
  use_instructions=True,
307
307
  public_training_code="https://github.com/codefuse-ai/F2LLM",
308
308
  public_training_data="https://huggingface.co/datasets/codefuse-ai/F2LLM",
@@ -325,7 +325,7 @@ C2LLM_0B5 = ModelMeta(
325
325
  open_weights=True,
326
326
  public_training_code=None,
327
327
  public_training_data=None,
328
- framework=["PyTorch", "Sentence Transformers"],
328
+ framework=["PyTorch", "Sentence Transformers", "Transformers", "safetensors"],
329
329
  reference="https://huggingface.co/codefuse-ai/C2LLM-0.5B",
330
330
  similarity_fn_name=ScoringFunction.COSINE,
331
331
  use_instructions=True,
@@ -353,7 +353,7 @@ C2LLM_7B = ModelMeta(
353
353
  open_weights=True,
354
354
  public_training_code=None,
355
355
  public_training_data=None,
356
- framework=["PyTorch", "Sentence Transformers"],
356
+ framework=["PyTorch", "Sentence Transformers", "Transformers", "safetensors"],
357
357
  reference="https://huggingface.co/codefuse-ai/C2LLM-7B",
358
358
  similarity_fn_name=ScoringFunction.COSINE,
359
359
  use_instructions=True,
@@ -35,7 +35,7 @@ codesage_large = ModelMeta(
35
35
  open_weights=True,
36
36
  public_training_code=None,
37
37
  public_training_data=None,
38
- framework=["PyTorch"],
38
+ framework=["PyTorch", "Transformers"],
39
39
  reference="https://huggingface.co/codesage/codesage-large-v2",
40
40
  similarity_fn_name=ScoringFunction.COSINE,
41
41
  use_instructions=False,
@@ -62,7 +62,7 @@ codesage_base = ModelMeta(
62
62
  open_weights=True,
63
63
  public_training_code=None,
64
64
  public_training_data=None,
65
- framework=["PyTorch"],
65
+ framework=["PyTorch", "Transformers"],
66
66
  reference="https://huggingface.co/codesage/codesage-base-v2",
67
67
  similarity_fn_name=ScoringFunction.COSINE,
68
68
  use_instructions=False,
@@ -89,7 +89,7 @@ codesage_small = ModelMeta(
89
89
  open_weights=True,
90
90
  public_training_code=None,
91
91
  public_training_data=None,
92
- framework=["PyTorch"],
92
+ framework=["PyTorch", "Transformers"],
93
93
  reference="https://huggingface.co/codesage/codesage-small-v2",
94
94
  similarity_fn_name=ScoringFunction.COSINE,
95
95
  use_instructions=False,
@@ -392,7 +392,7 @@ cohere_mult_3 = ModelMeta(
392
392
  reference="https://cohere.com/blog/introducing-embed-v3",
393
393
  license=None,
394
394
  similarity_fn_name=ScoringFunction.COSINE,
395
- framework=["API"],
395
+ framework=["API", "Transformers"],
396
396
  use_instructions=True,
397
397
  public_training_code=None,
398
398
  public_training_data=None, # assumed
@@ -417,7 +417,7 @@ cohere_eng_3 = ModelMeta(
417
417
  embed_dim=1024,
418
418
  license=None,
419
419
  similarity_fn_name=ScoringFunction.COSINE,
420
- framework=["API"],
420
+ framework=["API", "Transformers"],
421
421
  use_instructions=True,
422
422
  public_training_code=None,
423
423
  public_training_data=None, # assumed
@@ -442,7 +442,7 @@ cohere_mult_light_3 = ModelMeta(
442
442
  embed_dim=384,
443
443
  license=None,
444
444
  similarity_fn_name=ScoringFunction.COSINE,
445
- framework=["API"],
445
+ framework=["API", "Transformers"],
446
446
  use_instructions=True,
447
447
  public_training_code=None,
448
448
  public_training_data=None, # assumed
@@ -467,7 +467,7 @@ cohere_eng_light_3 = ModelMeta(
467
467
  embed_dim=384,
468
468
  license=None,
469
469
  similarity_fn_name=ScoringFunction.COSINE,
470
- framework=["API"],
470
+ framework=["API", "Transformers"],
471
471
  use_instructions=True,
472
472
  public_training_code=None,
473
473
  public_training_data=None, # assumed
@@ -226,7 +226,7 @@ colpali_v1_1 = ModelMeta(
226
226
  open_weights=True,
227
227
  public_training_code="https://github.com/illuin-tech/colpali",
228
228
  public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
229
- framework=["ColPali"],
229
+ framework=["ColPali", "safetensors"],
230
230
  reference="https://huggingface.co/vidore/colpali-v1.1",
231
231
  similarity_fn_name=ScoringFunction.MAX_SIM,
232
232
  use_instructions=True,
@@ -253,7 +253,7 @@ colpali_v1_2 = ModelMeta(
253
253
  open_weights=True,
254
254
  public_training_code="https://github.com/illuin-tech/colpali",
255
255
  public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
256
- framework=["ColPali"],
256
+ framework=["ColPali", "safetensors"],
257
257
  reference="https://huggingface.co/vidore/colpali-v1.2",
258
258
  similarity_fn_name=ScoringFunction.MAX_SIM,
259
259
  use_instructions=True,
@@ -280,7 +280,7 @@ colpali_v1_3 = ModelMeta(
280
280
  open_weights=True,
281
281
  public_training_code="https://github.com/illuin-tech/colpali",
282
282
  public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
283
- framework=["ColPali"],
283
+ framework=["ColPali", "safetensors"],
284
284
  reference="https://huggingface.co/vidore/colpali-v1.3",
285
285
  similarity_fn_name=ScoringFunction.MAX_SIM,
286
286
  use_instructions=True,
@@ -226,7 +226,7 @@ colqwen2 = ModelMeta(
226
226
  open_weights=True,
227
227
  public_training_code="https://github.com/illuin-tech/colpali",
228
228
  public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
229
- framework=["ColPali"],
229
+ framework=["ColPali", "safetensors"],
230
230
  reference="https://huggingface.co/vidore/colqwen2-v1.0",
231
231
  similarity_fn_name="MaxSim",
232
232
  use_instructions=True,
@@ -253,7 +253,7 @@ colqwen2_5 = ModelMeta(
253
253
  open_weights=True,
254
254
  public_training_code="https://github.com/illuin-tech/colpali",
255
255
  public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
256
- framework=["ColPali"],
256
+ framework=["ColPali", "safetensors"],
257
257
  reference="https://huggingface.co/vidore/colqwen2.5-v0.2",
258
258
  similarity_fn_name="MaxSim",
259
259
  use_instructions=True,
@@ -297,7 +297,7 @@ colqwen3_8b = ModelMeta(
297
297
  open_weights=True,
298
298
  public_training_code="https://github.com/illuin-tech/colpali",
299
299
  public_training_data=None,
300
- framework=["PyTorch"],
300
+ framework=["PyTorch", "Transformers", "safetensors"],
301
301
  reference="https://huggingface.co/TomoroAI/tomoro-colqwen3-embed-8b",
302
302
  similarity_fn_name=ScoringFunction.MAX_SIM,
303
303
  use_instructions=True,
@@ -321,7 +321,7 @@ colqwen3_4b = ModelMeta(
321
321
  open_weights=True,
322
322
  public_training_code="https://github.com/illuin-tech/colpali",
323
323
  public_training_data=None,
324
- framework=["PyTorch"],
324
+ framework=["PyTorch", "Transformers", "safetensors"],
325
325
  reference="https://huggingface.co/TomoroAI/tomoro-colqwen3-embed-4b",
326
326
  similarity_fn_name=ScoringFunction.MAX_SIM,
327
327
  use_instructions=True,
@@ -348,7 +348,7 @@ colnomic_7b = ModelMeta(
348
348
  open_weights=True,
349
349
  public_training_code="https://github.com/nomic-ai/colpali",
350
350
  public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
351
- framework=["ColPali"],
351
+ framework=["ColPali", "safetensors"],
352
352
  reference="https://huggingface.co/nomic-ai/colnomic-embed-multimodal-7b",
353
353
  similarity_fn_name="MaxSim",
354
354
  use_instructions=True,
@@ -393,7 +393,7 @@ colnomic_3b = ModelMeta(
393
393
  open_weights=True,
394
394
  public_training_code="https://github.com/nomic-ai/colpali",
395
395
  public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
396
- framework=["ColPali"],
396
+ framework=["ColPali", "safetensors"],
397
397
  reference="https://huggingface.co/nomic-ai/colnomic-embed-multimodal-3b",
398
398
  similarity_fn_name="MaxSim",
399
399
  use_instructions=True,
@@ -458,7 +458,7 @@ evoqwen25_vl_retriever_3b_v1 = ModelMeta(
458
458
  open_weights=True,
459
459
  public_training_code="https://github.com/illuin-tech/colpali",
460
460
  public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
461
- framework=["ColPali"],
461
+ framework=["ColPali", "safetensors"],
462
462
  reference="https://huggingface.co/ApsaraStackMaaS/EvoQwen2.5-VL-Retriever-3B-v1",
463
463
  similarity_fn_name="MaxSim",
464
464
  use_instructions=True,
@@ -484,7 +484,7 @@ evoqwen25_vl_retriever_7b_v1 = ModelMeta(
484
484
  open_weights=True,
485
485
  public_training_code="https://github.com/illuin-tech/colpali",
486
486
  public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
487
- framework=["ColPali"],
487
+ framework=["ColPali", "safetensors"],
488
488
  reference="https://huggingface.co/ApsaraStackMaaS/EvoQwen2.5-VL-Retriever-7B-v1",
489
489
  similarity_fn_name="MaxSim",
490
490
  use_instructions=True,