mteb 2.6.4__py3-none-any.whl → 2.6.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mteb/abstasks/classification.py +2 -3
- mteb/abstasks/multilabel_classification.py +3 -3
- mteb/abstasks/regression.py +1 -1
- mteb/abstasks/retrieval.py +1 -1
- mteb/abstasks/task_metadata.py +9 -14
- mteb/models/model_implementations/align_models.py +1 -1
- mteb/models/model_implementations/andersborges.py +2 -2
- mteb/models/model_implementations/ara_models.py +1 -1
- mteb/models/model_implementations/arctic_models.py +8 -8
- mteb/models/model_implementations/b1ade_models.py +1 -1
- mteb/models/model_implementations/bge_models.py +45 -21
- mteb/models/model_implementations/bica_model.py +3 -3
- mteb/models/model_implementations/blip2_models.py +2 -2
- mteb/models/model_implementations/blip_models.py +8 -8
- mteb/models/model_implementations/bmretriever_models.py +4 -4
- mteb/models/model_implementations/cadet_models.py +1 -1
- mteb/models/model_implementations/cde_models.py +2 -2
- mteb/models/model_implementations/clip_models.py +3 -3
- mteb/models/model_implementations/clips_models.py +3 -3
- mteb/models/model_implementations/codefuse_models.py +5 -5
- mteb/models/model_implementations/codesage_models.py +3 -3
- mteb/models/model_implementations/cohere_models.py +4 -4
- mteb/models/model_implementations/colpali_models.py +3 -3
- mteb/models/model_implementations/colqwen_models.py +8 -8
- mteb/models/model_implementations/colsmol_models.py +2 -2
- mteb/models/model_implementations/conan_models.py +1 -1
- mteb/models/model_implementations/dino_models.py +19 -19
- mteb/models/model_implementations/e5_instruct.py +23 -4
- mteb/models/model_implementations/e5_models.py +9 -9
- mteb/models/model_implementations/e5_v.py +1 -1
- mteb/models/model_implementations/eagerworks_models.py +1 -1
- mteb/models/model_implementations/emillykkejensen_models.py +3 -3
- mteb/models/model_implementations/en_code_retriever.py +1 -1
- mteb/models/model_implementations/euler_models.py +2 -2
- mteb/models/model_implementations/fa_models.py +9 -9
- mteb/models/model_implementations/facebookai.py +14 -2
- mteb/models/model_implementations/geogpt_models.py +1 -1
- mteb/models/model_implementations/gme_v_models.py +2 -2
- mteb/models/model_implementations/google_models.py +1 -1
- mteb/models/model_implementations/granite_vision_embedding_models.py +1 -1
- mteb/models/model_implementations/gritlm_models.py +2 -2
- mteb/models/model_implementations/gte_models.py +25 -13
- mteb/models/model_implementations/hinvec_models.py +1 -1
- mteb/models/model_implementations/ibm_granite_models.py +30 -6
- mteb/models/model_implementations/inf_models.py +2 -2
- mteb/models/model_implementations/jasper_models.py +2 -2
- mteb/models/model_implementations/jina_clip.py +1 -1
- mteb/models/model_implementations/jina_models.py +11 -5
- mteb/models/model_implementations/kblab.py +12 -6
- mteb/models/model_implementations/kennethenevoldsen_models.py +2 -2
- mteb/models/model_implementations/kfst.py +1 -1
- mteb/models/model_implementations/kowshik24_models.py +1 -1
- mteb/models/model_implementations/lgai_embedding_models.py +1 -1
- mteb/models/model_implementations/linq_models.py +1 -1
- mteb/models/model_implementations/listconranker.py +1 -1
- mteb/models/model_implementations/llm2clip_models.py +3 -3
- mteb/models/model_implementations/llm2vec_models.py +8 -8
- mteb/models/model_implementations/mdbr_models.py +14 -2
- mteb/models/model_implementations/misc_models.py +68 -68
- mteb/models/model_implementations/mme5_models.py +1 -1
- mteb/models/model_implementations/moco_models.py +2 -2
- mteb/models/model_implementations/mod_models.py +1 -1
- mteb/models/model_implementations/model2vec_models.py +13 -13
- mteb/models/model_implementations/moka_models.py +1 -1
- mteb/models/model_implementations/mxbai_models.py +16 -3
- mteb/models/model_implementations/nbailab.py +3 -3
- mteb/models/model_implementations/no_instruct_sentence_models.py +1 -1
- mteb/models/model_implementations/nomic_models.py +18 -6
- mteb/models/model_implementations/nomic_models_vision.py +1 -1
- mteb/models/model_implementations/nvidia_llama_nemoretriever_colemb.py +2 -2
- mteb/models/model_implementations/nvidia_models.py +3 -3
- mteb/models/model_implementations/octen_models.py +2 -2
- mteb/models/model_implementations/openclip_models.py +6 -6
- mteb/models/model_implementations/opensearch_neural_sparse_models.py +5 -5
- mteb/models/model_implementations/ops_moa_models.py +1 -1
- mteb/models/model_implementations/ordalietech_solon_embeddings_mini_beta_1_1.py +1 -1
- mteb/models/model_implementations/pawan_models.py +1 -1
- mteb/models/model_implementations/piccolo_models.py +1 -1
- mteb/models/model_implementations/promptriever_models.py +4 -4
- mteb/models/model_implementations/pylate_models.py +5 -5
- mteb/models/model_implementations/qodo_models.py +2 -2
- mteb/models/model_implementations/qtack_models.py +1 -1
- mteb/models/model_implementations/qwen3_models.py +3 -3
- mteb/models/model_implementations/qzhou_models.py +2 -2
- mteb/models/model_implementations/rasgaard_models.py +1 -1
- mteb/models/model_implementations/reasonir_model.py +1 -1
- mteb/models/model_implementations/repllama_models.py +1 -1
- mteb/models/model_implementations/rerankers_custom.py +9 -3
- mteb/models/model_implementations/rerankers_monot5_based.py +14 -14
- mteb/models/model_implementations/richinfoai_models.py +1 -1
- mteb/models/model_implementations/ru_sentence_models.py +20 -20
- mteb/models/model_implementations/ruri_models.py +10 -10
- mteb/models/model_implementations/salesforce_models.py +3 -3
- mteb/models/model_implementations/samilpwc_models.py +1 -1
- mteb/models/model_implementations/sarashina_embedding_models.py +2 -2
- mteb/models/model_implementations/searchmap_models.py +1 -1
- mteb/models/model_implementations/sentence_transformers_models.py +58 -22
- mteb/models/model_implementations/shuu_model.py +1 -1
- mteb/models/model_implementations/siglip_models.py +10 -10
- mteb/models/model_implementations/slm_models.py +416 -0
- mteb/models/model_implementations/spartan8806_atles_champion.py +1 -1
- mteb/models/model_implementations/stella_models.py +17 -4
- mteb/models/model_implementations/tarka_models.py +2 -2
- mteb/models/model_implementations/text2vec_models.py +9 -3
- mteb/models/model_implementations/ua_sentence_models.py +1 -1
- mteb/models/model_implementations/uae_models.py +7 -1
- mteb/models/model_implementations/vdr_models.py +1 -1
- mteb/models/model_implementations/vi_vn_models.py +6 -6
- mteb/models/model_implementations/vlm2vec_models.py +2 -2
- mteb/models/model_implementations/youtu_models.py +1 -1
- mteb/models/model_implementations/yuan_models.py +1 -1
- mteb/models/model_implementations/yuan_models_en.py +1 -1
- mteb/models/model_meta.py +46 -17
- mteb/results/benchmark_results.py +2 -2
- mteb/tasks/classification/kur/kurdish_sentiment_classification.py +2 -2
- mteb/tasks/clustering/eng/hume_wiki_cities_clustering.py +1 -1
- mteb/tasks/clustering/eng/wiki_cities_clustering.py +1 -1
- mteb/tasks/clustering/zho/cmteb_clustering.py +2 -2
- mteb/tasks/reranking/multilingual/wikipedia_reranking_multilingual.py +1 -1
- mteb/tasks/retrieval/eng/cub200_i2i_retrieval.py +1 -1
- {mteb-2.6.4.dist-info → mteb-2.6.5.dist-info}/METADATA +3 -1
- {mteb-2.6.4.dist-info → mteb-2.6.5.dist-info}/RECORD +126 -125
- {mteb-2.6.4.dist-info → mteb-2.6.5.dist-info}/WHEEL +0 -0
- {mteb-2.6.4.dist-info → mteb-2.6.5.dist-info}/entry_points.txt +0 -0
- {mteb-2.6.4.dist-info → mteb-2.6.5.dist-info}/licenses/LICENSE +0 -0
- {mteb-2.6.4.dist-info → mteb-2.6.5.dist-info}/top_level.txt +0 -0
|
@@ -125,7 +125,13 @@ all_minilm_l6_v2 = ModelMeta(
|
|
|
125
125
|
max_tokens=256,
|
|
126
126
|
reference="https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2",
|
|
127
127
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
128
|
-
framework=[
|
|
128
|
+
framework=[
|
|
129
|
+
"Sentence Transformers",
|
|
130
|
+
"PyTorch",
|
|
131
|
+
"ONNX",
|
|
132
|
+
"safetensors",
|
|
133
|
+
"Transformers",
|
|
134
|
+
],
|
|
129
135
|
use_instructions=False,
|
|
130
136
|
superseded_by=None,
|
|
131
137
|
adapted_from=None,
|
|
@@ -150,7 +156,13 @@ all_minilm_l12_v2 = ModelMeta(
|
|
|
150
156
|
max_tokens=256,
|
|
151
157
|
reference="https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2",
|
|
152
158
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
153
|
-
framework=[
|
|
159
|
+
framework=[
|
|
160
|
+
"Sentence Transformers",
|
|
161
|
+
"PyTorch",
|
|
162
|
+
"ONNX",
|
|
163
|
+
"safetensors",
|
|
164
|
+
"Transformers",
|
|
165
|
+
],
|
|
154
166
|
use_instructions=False,
|
|
155
167
|
superseded_by=None,
|
|
156
168
|
adapted_from=None,
|
|
@@ -175,7 +187,13 @@ paraphrase_multilingual_minilm_l12_v2 = ModelMeta(
|
|
|
175
187
|
max_tokens=512,
|
|
176
188
|
reference="https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2",
|
|
177
189
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
178
|
-
framework=[
|
|
190
|
+
framework=[
|
|
191
|
+
"Sentence Transformers",
|
|
192
|
+
"PyTorch",
|
|
193
|
+
"ONNX",
|
|
194
|
+
"safetensors",
|
|
195
|
+
"Transformers",
|
|
196
|
+
],
|
|
179
197
|
use_instructions=False,
|
|
180
198
|
superseded_by=None,
|
|
181
199
|
adapted_from=None,
|
|
@@ -200,7 +218,13 @@ paraphrase_multilingual_mpnet_base_v2 = ModelMeta(
|
|
|
200
218
|
max_tokens=512,
|
|
201
219
|
reference="https://huggingface.co/sentence-transformers/paraphrase-multilingual-mpnet-base-v2",
|
|
202
220
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
203
|
-
framework=[
|
|
221
|
+
framework=[
|
|
222
|
+
"Sentence Transformers",
|
|
223
|
+
"PyTorch",
|
|
224
|
+
"ONNX",
|
|
225
|
+
"safetensors",
|
|
226
|
+
"Transformers",
|
|
227
|
+
],
|
|
204
228
|
use_instructions=False,
|
|
205
229
|
superseded_by=None,
|
|
206
230
|
adapted_from=None,
|
|
@@ -236,7 +260,7 @@ labse = ModelMeta(
|
|
|
236
260
|
max_tokens=512,
|
|
237
261
|
reference="https://huggingface.co/sentence-transformers/LaBSE",
|
|
238
262
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
239
|
-
framework=["Sentence Transformers", "PyTorch"],
|
|
263
|
+
framework=["Sentence Transformers", "PyTorch", "ONNX", "safetensors"],
|
|
240
264
|
use_instructions=False,
|
|
241
265
|
superseded_by=None,
|
|
242
266
|
adapted_from=None,
|
|
@@ -274,7 +298,13 @@ multi_qa_minilm_l6_cos_v1 = ModelMeta(
|
|
|
274
298
|
max_tokens=512,
|
|
275
299
|
reference="https://huggingface.co/sentence-transformers/multi-qa-MiniLM-L6-cos-v1",
|
|
276
300
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
277
|
-
framework=[
|
|
301
|
+
framework=[
|
|
302
|
+
"Sentence Transformers",
|
|
303
|
+
"PyTorch",
|
|
304
|
+
"ONNX",
|
|
305
|
+
"safetensors",
|
|
306
|
+
"Transformers",
|
|
307
|
+
],
|
|
278
308
|
use_instructions=False,
|
|
279
309
|
superseded_by=None,
|
|
280
310
|
adapted_from="nreimers/MiniLM-L6-H384-uncased",
|
|
@@ -299,7 +329,13 @@ all_mpnet_base_v2 = ModelMeta(
|
|
|
299
329
|
max_tokens=384,
|
|
300
330
|
reference="https://huggingface.co/sentence-transformers/all-mpnet-base-v2",
|
|
301
331
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
302
|
-
framework=[
|
|
332
|
+
framework=[
|
|
333
|
+
"Sentence Transformers",
|
|
334
|
+
"PyTorch",
|
|
335
|
+
"ONNX",
|
|
336
|
+
"safetensors",
|
|
337
|
+
"Transformers",
|
|
338
|
+
],
|
|
303
339
|
use_instructions=False,
|
|
304
340
|
superseded_by=None,
|
|
305
341
|
adapted_from=None,
|
|
@@ -403,7 +439,7 @@ static_similarity_mrl_multilingual_v1 = ModelMeta(
|
|
|
403
439
|
max_tokens=None,
|
|
404
440
|
reference="https://huggingface.co/sentence-transformers/static-similarity-mrl-multilingual-v1",
|
|
405
441
|
similarity_fn_name="cosine",
|
|
406
|
-
framework=["Sentence Transformers", "PyTorch"],
|
|
442
|
+
framework=["Sentence Transformers", "PyTorch", "ONNX", "safetensors"],
|
|
407
443
|
use_instructions=False,
|
|
408
444
|
superseded_by=None,
|
|
409
445
|
adapted_from=None,
|
|
@@ -436,7 +472,7 @@ contriever = ModelMeta(
|
|
|
436
472
|
max_tokens=512,
|
|
437
473
|
reference="https://huggingface.co/facebook/contriever-msmarco",
|
|
438
474
|
similarity_fn_name=ScoringFunction.DOT_PRODUCT,
|
|
439
|
-
framework=["Sentence Transformers", "PyTorch"],
|
|
475
|
+
framework=["Sentence Transformers", "PyTorch", "Transformers"],
|
|
440
476
|
use_instructions=False,
|
|
441
477
|
citation="""
|
|
442
478
|
@misc{izacard2021contriever,
|
|
@@ -466,7 +502,7 @@ microllama_text_embedding = ModelMeta(
|
|
|
466
502
|
max_tokens=2048,
|
|
467
503
|
reference="https://huggingface.co/keeeeenw/MicroLlama-text-embedding",
|
|
468
504
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
469
|
-
framework=["Sentence Transformers", "PyTorch"],
|
|
505
|
+
framework=["Sentence Transformers", "PyTorch", "safetensors"],
|
|
470
506
|
use_instructions=False,
|
|
471
507
|
superseded_by=None,
|
|
472
508
|
adapted_from=None,
|
|
@@ -488,13 +524,13 @@ microllama_text_embedding = ModelMeta(
|
|
|
488
524
|
|
|
489
525
|
SENTENCE_T5_CITATION = """
|
|
490
526
|
@misc{ni2021sentencet5scalablesentenceencoders,
|
|
491
|
-
title={Sentence-T5: Scalable Sentence Encoders from Pre-trained Text-to-Text Models},
|
|
527
|
+
title={Sentence-T5: Scalable Sentence Encoders from Pre-trained Text-to-Text Models},
|
|
492
528
|
author={Jianmo Ni and Gustavo Hernández Ábrego and Noah Constant and Ji Ma and Keith B. Hall and Daniel Cer and Yinfei Yang},
|
|
493
529
|
year={2021},
|
|
494
530
|
eprint={2108.08877},
|
|
495
531
|
archivePrefix={arXiv},
|
|
496
532
|
primaryClass={cs.CL},
|
|
497
|
-
url={https://arxiv.org/abs/2108.08877},
|
|
533
|
+
url={https://arxiv.org/abs/2108.08877},
|
|
498
534
|
}
|
|
499
535
|
"""
|
|
500
536
|
sentence_t5_base = ModelMeta(
|
|
@@ -512,7 +548,7 @@ sentence_t5_base = ModelMeta(
|
|
|
512
548
|
max_tokens=512,
|
|
513
549
|
reference="https://huggingface.co/sentence-transformers/sentence-t5-base",
|
|
514
550
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
515
|
-
framework=["Sentence Transformers", "PyTorch"],
|
|
551
|
+
framework=["Sentence Transformers", "PyTorch", "safetensors"],
|
|
516
552
|
use_instructions=False,
|
|
517
553
|
public_training_code=None,
|
|
518
554
|
public_training_data=None,
|
|
@@ -535,7 +571,7 @@ sentence_t5_large = ModelMeta(
|
|
|
535
571
|
max_tokens=512,
|
|
536
572
|
reference="https://huggingface.co/sentence-transformers/sentence-t5-large",
|
|
537
573
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
538
|
-
framework=["Sentence Transformers", "PyTorch"],
|
|
574
|
+
framework=["Sentence Transformers", "PyTorch", "ONNX", "safetensors"],
|
|
539
575
|
use_instructions=False,
|
|
540
576
|
public_training_code=None,
|
|
541
577
|
public_training_data=None,
|
|
@@ -558,7 +594,7 @@ sentence_t5_xl = ModelMeta(
|
|
|
558
594
|
max_tokens=512,
|
|
559
595
|
reference="https://huggingface.co/sentence-transformers/sentence-t5-xl",
|
|
560
596
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
561
|
-
framework=["Sentence Transformers", "PyTorch"],
|
|
597
|
+
framework=["Sentence Transformers", "PyTorch", "safetensors"],
|
|
562
598
|
use_instructions=False,
|
|
563
599
|
public_training_code=None,
|
|
564
600
|
public_training_data=None,
|
|
@@ -581,7 +617,7 @@ sentence_t5_xxl = ModelMeta(
|
|
|
581
617
|
max_tokens=512,
|
|
582
618
|
reference="https://huggingface.co/sentence-transformers/sentence-t5-xxl",
|
|
583
619
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
584
|
-
framework=["Sentence Transformers", "PyTorch"],
|
|
620
|
+
framework=["Sentence Transformers", "PyTorch", "safetensors"],
|
|
585
621
|
use_instructions=False,
|
|
586
622
|
public_training_code=None,
|
|
587
623
|
public_training_data=None,
|
|
@@ -590,13 +626,13 @@ sentence_t5_xxl = ModelMeta(
|
|
|
590
626
|
)
|
|
591
627
|
GTR_CITATION = """
|
|
592
628
|
@misc{ni2021largedualencodersgeneralizable,
|
|
593
|
-
title={Large Dual Encoders Are Generalizable Retrievers},
|
|
629
|
+
title={Large Dual Encoders Are Generalizable Retrievers},
|
|
594
630
|
author={Jianmo Ni and Chen Qu and Jing Lu and Zhuyun Dai and Gustavo Hernández Ábrego and Ji Ma and Vincent Y. Zhao and Yi Luan and Keith B. Hall and Ming-Wei Chang and Yinfei Yang},
|
|
595
631
|
year={2021},
|
|
596
632
|
eprint={2112.07899},
|
|
597
633
|
archivePrefix={arXiv},
|
|
598
634
|
primaryClass={cs.IR},
|
|
599
|
-
url={https://arxiv.org/abs/2112.07899},
|
|
635
|
+
url={https://arxiv.org/abs/2112.07899},
|
|
600
636
|
}
|
|
601
637
|
"""
|
|
602
638
|
gtr_t5_large = ModelMeta(
|
|
@@ -614,7 +650,7 @@ gtr_t5_large = ModelMeta(
|
|
|
614
650
|
max_tokens=512,
|
|
615
651
|
reference="https://huggingface.co/sentence-transformers/gtr-t5-large",
|
|
616
652
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
617
|
-
framework=["Sentence Transformers", "PyTorch"],
|
|
653
|
+
framework=["Sentence Transformers", "PyTorch", "safetensors"],
|
|
618
654
|
use_instructions=False,
|
|
619
655
|
public_training_code=None,
|
|
620
656
|
public_training_data=None,
|
|
@@ -649,7 +685,7 @@ gtr_t5_xl = ModelMeta(
|
|
|
649
685
|
max_tokens=512,
|
|
650
686
|
reference="https://huggingface.co/sentence-transformers/gtr-t5-xl",
|
|
651
687
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
652
|
-
framework=["Sentence Transformers", "PyTorch"],
|
|
688
|
+
framework=["Sentence Transformers", "PyTorch", "safetensors"],
|
|
653
689
|
use_instructions=False,
|
|
654
690
|
public_training_code=None,
|
|
655
691
|
public_training_data=None,
|
|
@@ -683,7 +719,7 @@ gtr_t5_xxl = ModelMeta(
|
|
|
683
719
|
max_tokens=512,
|
|
684
720
|
reference="https://huggingface.co/sentence-transformers/gtr-t5-xxl",
|
|
685
721
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
686
|
-
framework=["Sentence Transformers", "PyTorch"],
|
|
722
|
+
framework=["Sentence Transformers", "PyTorch", "safetensors"],
|
|
687
723
|
use_instructions=False,
|
|
688
724
|
public_training_code=None,
|
|
689
725
|
public_training_data=None,
|
|
@@ -718,7 +754,7 @@ gtr_t5_base = ModelMeta(
|
|
|
718
754
|
max_tokens=512,
|
|
719
755
|
reference="https://huggingface.co/sentence-transformers/gtr-t5-base",
|
|
720
756
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
721
|
-
framework=["Sentence Transformers", "PyTorch"],
|
|
757
|
+
framework=["Sentence Transformers", "PyTorch", "safetensors"],
|
|
722
758
|
use_instructions=False,
|
|
723
759
|
public_training_code=None,
|
|
724
760
|
public_training_data=None,
|
|
@@ -16,7 +16,7 @@ codemodernbert_crow_meta = ModelMeta(
|
|
|
16
16
|
max_tokens=1024,
|
|
17
17
|
reference="https://huggingface.co/Shuu12121/CodeSearch-ModernBERT-Crow-Plus",
|
|
18
18
|
similarity_fn_name="cosine",
|
|
19
|
-
framework=["Sentence Transformers", "PyTorch"],
|
|
19
|
+
framework=["Sentence Transformers", "PyTorch", "safetensors"],
|
|
20
20
|
use_instructions=False,
|
|
21
21
|
public_training_code=None,
|
|
22
22
|
public_training_data=None,
|
|
@@ -138,7 +138,7 @@ siglip_so400m_patch14_224 = ModelMeta(
|
|
|
138
138
|
open_weights=True,
|
|
139
139
|
public_training_code="https://github.com/google-research/big_vision/blob/main/big_vision/trainers/proj/image_text/siglip.py",
|
|
140
140
|
public_training_data=None,
|
|
141
|
-
framework=["PyTorch"],
|
|
141
|
+
framework=["PyTorch", "Transformers", "safetensors"],
|
|
142
142
|
reference="https://huggingface.co/google/siglip-so400m-patch14-224",
|
|
143
143
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
144
144
|
use_instructions=False,
|
|
@@ -162,7 +162,7 @@ siglip_so400m_patch14_384 = ModelMeta(
|
|
|
162
162
|
open_weights=True,
|
|
163
163
|
public_training_code="https://github.com/google-research/big_vision/blob/main/big_vision/trainers/proj/image_text/siglip.py",
|
|
164
164
|
public_training_data=None,
|
|
165
|
-
framework=["PyTorch"],
|
|
165
|
+
framework=["PyTorch", "Transformers", "safetensors"],
|
|
166
166
|
reference="https://huggingface.co/google/siglip-so400m-patch14-384",
|
|
167
167
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
168
168
|
use_instructions=False,
|
|
@@ -186,7 +186,7 @@ siglip_so400m_patch16_256_i18n = ModelMeta(
|
|
|
186
186
|
open_weights=True,
|
|
187
187
|
public_training_code="https://github.com/google-research/big_vision/blob/main/big_vision/trainers/proj/image_text/siglip.py",
|
|
188
188
|
public_training_data=None,
|
|
189
|
-
framework=["PyTorch"],
|
|
189
|
+
framework=["PyTorch", "Transformers", "safetensors"],
|
|
190
190
|
reference="https://huggingface.co/google/siglip-so400m-patch16-256-i18n",
|
|
191
191
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
192
192
|
use_instructions=False,
|
|
@@ -210,7 +210,7 @@ siglip_base_patch16_256_multilingual = ModelMeta(
|
|
|
210
210
|
open_weights=True,
|
|
211
211
|
public_training_code="https://github.com/google-research/big_vision/blob/main/big_vision/trainers/proj/image_text/siglip.py",
|
|
212
212
|
public_training_data=None,
|
|
213
|
-
framework=["PyTorch"],
|
|
213
|
+
framework=["PyTorch", "Transformers", "safetensors"],
|
|
214
214
|
reference="https://huggingface.co/google/siglip-base-patch16-256-multilingual",
|
|
215
215
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
216
216
|
use_instructions=False,
|
|
@@ -234,7 +234,7 @@ siglip_base_patch16_256 = ModelMeta(
|
|
|
234
234
|
open_weights=True,
|
|
235
235
|
public_training_code="https://github.com/google-research/big_vision/blob/main/big_vision/trainers/proj/image_text/siglip.py",
|
|
236
236
|
public_training_data=None,
|
|
237
|
-
framework=["PyTorch"],
|
|
237
|
+
framework=["PyTorch", "Transformers", "safetensors"],
|
|
238
238
|
reference="https://huggingface.co/google/siglip-base-patch16-256",
|
|
239
239
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
240
240
|
use_instructions=False,
|
|
@@ -258,7 +258,7 @@ siglip_base_patch16_512 = ModelMeta(
|
|
|
258
258
|
open_weights=True,
|
|
259
259
|
public_training_code="https://github.com/google-research/big_vision/blob/main/big_vision/trainers/proj/image_text/siglip.py",
|
|
260
260
|
public_training_data=None,
|
|
261
|
-
framework=["PyTorch"],
|
|
261
|
+
framework=["PyTorch", "Transformers", "safetensors"],
|
|
262
262
|
reference="https://huggingface.co/google/siglip-base-patch16-512",
|
|
263
263
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
264
264
|
use_instructions=False,
|
|
@@ -282,7 +282,7 @@ siglip_base_patch16_384 = ModelMeta(
|
|
|
282
282
|
open_weights=True,
|
|
283
283
|
public_training_code="https://github.com/google-research/big_vision/blob/main/big_vision/trainers/proj/image_text/siglip.py",
|
|
284
284
|
public_training_data=None,
|
|
285
|
-
framework=["PyTorch"],
|
|
285
|
+
framework=["PyTorch", "Transformers", "safetensors"],
|
|
286
286
|
reference="https://huggingface.co/google/siglip-base-patch16-384",
|
|
287
287
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
288
288
|
use_instructions=False,
|
|
@@ -306,7 +306,7 @@ siglip_base_patch16_224 = ModelMeta(
|
|
|
306
306
|
open_weights=True,
|
|
307
307
|
public_training_code="https://github.com/google-research/big_vision/blob/main/big_vision/trainers/proj/image_text/siglip.py",
|
|
308
308
|
public_training_data=None,
|
|
309
|
-
framework=["PyTorch"],
|
|
309
|
+
framework=["PyTorch", "Transformers", "safetensors"],
|
|
310
310
|
reference="https://huggingface.co/google/siglip-base-patch16-224",
|
|
311
311
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
312
312
|
use_instructions=False,
|
|
@@ -330,7 +330,7 @@ siglip_large_patch16_256 = ModelMeta(
|
|
|
330
330
|
open_weights=True,
|
|
331
331
|
public_training_code="https://github.com/google-research/big_vision/blob/main/big_vision/trainers/proj/image_text/siglip.py",
|
|
332
332
|
public_training_data=None,
|
|
333
|
-
framework=["PyTorch"],
|
|
333
|
+
framework=["PyTorch", "Transformers", "safetensors"],
|
|
334
334
|
reference="https://huggingface.co/google/siglip-large-patch16-256",
|
|
335
335
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
336
336
|
use_instructions=False,
|
|
@@ -354,7 +354,7 @@ siglip_large_patch16_384 = ModelMeta(
|
|
|
354
354
|
open_weights=True,
|
|
355
355
|
public_training_code="https://github.com/google-research/big_vision/blob/main/big_vision/trainers/proj/image_text/siglip.py",
|
|
356
356
|
public_training_data=None,
|
|
357
|
-
framework=["PyTorch"],
|
|
357
|
+
framework=["PyTorch", "Transformers", "safetensors"],
|
|
358
358
|
reference="https://huggingface.co/google/siglip-large-patch16-384",
|
|
359
359
|
similarity_fn_name=ScoringFunction.COSINE,
|
|
360
360
|
use_instructions=False,
|