mteb 2.1.4__py3-none-any.whl → 2.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -220,3 +220,60 @@ colnomic_7b = ModelMeta(
220
220
  training_datasets=COLNOMIC_TRAINING_DATA,
221
221
  citation=COLNOMIC_CITATION,
222
222
  )
223
+
224
+
225
+ EVOQWEN_TRAINING_DATA = {
226
+ "colpali_train_set",
227
+ "VisRAG-Ret-Train-Synthetic-data",
228
+ "VisRAG-Ret-Train-In-domain-data",
229
+ }
230
+
231
+ evoqwen25_vl_retriever_3b_v1 = ModelMeta(
232
+ loader=ColQwen2_5Wrapper,
233
+ loader_kwargs=dict(
234
+ torch_dtype=torch.float16, attn_implementation="flash_attention_2"
235
+ ),
236
+ name="ApsaraStackMaaS/EvoQwen2.5-VL-Retriever-3B-v1",
237
+ languages=["eng-Latn"],
238
+ revision="aeacaa2775f2758d82721eb1cf2f5daf1a392da9",
239
+ release_date="2025-11-04",
240
+ modalities=["image", "text"],
241
+ n_parameters=3_000_000_000,
242
+ memory_usage_mb=7200,
243
+ max_tokens=128000,
244
+ embed_dim=128,
245
+ license="apache-2.0",
246
+ open_weights=True,
247
+ public_training_code="https://github.com/illuin-tech/colpali",
248
+ public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
249
+ framework=["ColPali"],
250
+ reference="https://huggingface.co/ApsaraStackMaaS/EvoQwen2.5-VL-Retriever-3B-v1",
251
+ similarity_fn_name="MaxSim",
252
+ use_instructions=True,
253
+ training_datasets=EVOQWEN_TRAINING_DATA,
254
+ )
255
+
256
+ evoqwen25_vl_retriever_7b_v1 = ModelMeta(
257
+ loader=ColQwen2_5Wrapper,
258
+ loader_kwargs=dict(
259
+ torch_dtype=torch.float16, attn_implementation="flash_attention_2"
260
+ ),
261
+ name="ApsaraStackMaaS/EvoQwen2.5-VL-Retriever-7B-v1",
262
+ languages=["eng-Latn"],
263
+ revision="8952ac6ee0e7de2e9211b165921518caf9202110",
264
+ release_date="2025-11-04",
265
+ modalities=["image", "text"],
266
+ n_parameters=7_000_000_000,
267
+ memory_usage_mb=14400,
268
+ max_tokens=128000,
269
+ embed_dim=128,
270
+ license="apache-2.0",
271
+ open_weights=True,
272
+ public_training_code="https://github.com/illuin-tech/colpali",
273
+ public_training_data="https://huggingface.co/datasets/vidore/colpali_train_set",
274
+ framework=["ColPali"],
275
+ reference="https://huggingface.co/ApsaraStackMaaS/EvoQwen2.5-VL-Retriever-7B-v1",
276
+ similarity_fn_name="MaxSim",
277
+ use_instructions=True,
278
+ training_datasets=EVOQWEN_TRAINING_DATA,
279
+ )
@@ -247,6 +247,40 @@ kalm_v2_training_data = {
247
247
  }
248
248
 
249
249
 
250
+ KaLM_Embedding_gemma_3_12b_training_data = [
251
+ "BornholmBitextMining",
252
+ "BibleNLPBitextMining", # train[256:] to avoid the test set in MTEB
253
+ "FinParaSTS",
254
+ "SemRel24STS",
255
+ "STSB",
256
+ "GermanSTSBenchmark",
257
+ "TERRa",
258
+ "StatcanDialogueDatasetRetrieval",
259
+ "SwissJudgementClassification",
260
+ "GreekLegalCodeClassification",
261
+ "CataloniaTweetClassification",
262
+ "NordicLangClassification",
263
+ "CyrillicTurkicLangClassification",
264
+ "PoemSentimentClassification",
265
+ "CzechProductReviewSentimentClassification",
266
+ "DBpediaClassification",
267
+ "IndicLangClassification",
268
+ "NusaParagraphEmotionClassification",
269
+ "OdiaNewsClassification",
270
+ "ScalaClassification",
271
+ "ItaCaseholdClassification",
272
+ "CSFDSKMovieReviewSentimentClassification",
273
+ "ToxicConversationsClassification",
274
+ "PAC",
275
+ "SpartQA",
276
+ "WinoGrande",
277
+ "Quail",
278
+ "HellaSwag",
279
+ "AlphaNLI",
280
+ ]
281
+ KaLM_Embedding_gemma_3_12b_training_data += kalm_training_data
282
+
283
+
250
284
  KaLM_task_prompts = {
251
285
  "AmazonCounterfactualClassification": "Given an Amazon review, judge whether it is counterfactual.",
252
286
  "AmazonPolarityClassification": "Classifying Amazon reviews into positive or negative sentiment",
@@ -450,13 +484,7 @@ KaLM_v2_task_prompts = {
450
484
  "SummEval-document": "Retrieve semantically similar summaries",
451
485
  }
452
486
 
453
- KaLM_X_task_prompts = {
454
- "Classification": "classify the query into different classes.",
455
- "MultilabelClassification": "Instruct: classify the query into different classes.",
456
- "Clustering": "classify the query into different classes.",
457
- "Reranking-query": "Given a query, retrieve documents that answer the query.",
458
- "Retrieval-query": "Given a query, retrieve documents that answer the query.",
459
- "InstructionRetrieval-query": "Given a query, retrieve documents that answer the query.",
487
+ KaLM_Embedding_gemma_3_12b_task_prompts = {
460
488
  "AmazonCounterfactualClassification": "Classify a given Amazon customer review text as either counterfactual or not-counterfactual",
461
489
  "AmazonPolarityClassification": "Classify Amazon reviews into positive or negative sentiment",
462
490
  "AmazonReviewsClassification": "Classify the given Amazon review into its appropriate rating category",
@@ -478,26 +506,38 @@ KaLM_X_task_prompts = {
478
506
  "ArxivClusteringP2P": "Identify the main and secondary category of Arxiv papers based on the titles and abstracts",
479
507
  "ArxivClusteringS2S": "Identify the main and secondary category of Arxiv papers based on the titles",
480
508
  "BiorxivClusteringP2P": "Identify the main category of Biorxiv papers based on the titles and abstracts",
509
+ "BiorxivClusteringP2P.v2": "Identify the main category of Biorxiv papers based on the titles and abstracts",
481
510
  "BiorxivClusteringS2S": "Identify the main category of Biorxiv papers based on the titles",
511
+ "BiorxivClusteringS2S.v2": "Identify the main category of Biorxiv papers based on the titles",
482
512
  "MedrxivClusteringP2P": "Identify the main category of Medrxiv papers based on the titles and abstracts",
513
+ "MedrxivClusteringP2P.v2": "Identify the main category of Medrxiv papers based on the titles and abstracts",
483
514
  "MedrxivClusteringS2S": "Identify the main category of Medrxiv papers based on the titles",
515
+ "MedrxivClusteringS2S.v2": "Identify the main category of Medrxiv papers based on the titles",
484
516
  "RedditClustering": "Identify the topic or theme of Reddit posts based on the titles",
485
517
  "RedditClusteringP2P": "Identify the topic or theme of Reddit posts based on the titles and posts",
486
518
  "StackExchangeClustering": "Identify the topic or theme of StackExchange posts based on the titles",
519
+ "StackExchangeClustering.v2": "Identify the topic or theme of StackExchange posts based on the titles",
487
520
  "StackExchangeClusteringP2P": "Identify the topic or theme of StackExchange posts based on the given paragraphs",
521
+ "StackExchangeClusteringP2P.v2": "Identify the topic or theme of StackExchange posts based on the given paragraphs",
488
522
  "TwentyNewsgroupsClustering": "Identify the topic or theme of the given news articles",
489
523
  "CLSClusteringS2S": "Identify the main category of scholar papers based on the titles",
490
524
  "CLSClusteringP2P": "Identify the main category of scholar papers based on the titles and abstracts",
525
+ "CLSClusteringP2P.v2": "Identify the main category of scholar papers based on the titles and abstracts",
491
526
  "ThuNewsClusteringS2S": "Identify the topic or theme of the given news articles based on the titles",
492
527
  "ThuNewsClusteringP2P": "Identify the topic or theme of the given news articles based on the titles and contents",
493
528
  "AskUbuntuDupQuestions-query": "Retrieve duplicate questions from AskUbuntu forum",
494
529
  "MindSmallReranking-query": "Retrieve relevant news articles based on user browsing history",
495
530
  "SciDocsRR-query": "Given a title of a scientific paper, retrieve the titles of other relevant papers",
496
531
  "StackOverflowDupQuestions-query": "Retrieve duplicate questions from StackOverflow forum",
532
+ "SprintDuplicateQuestions": "Retrieve semantically duplicate questions",
533
+ "TwitterSemEval2015": "Retrieve tweets that are semantically similar to the given tweet",
534
+ "TwitterURLCorpus": "Retrieve tweets that are semantically similar to the given tweet",
497
535
  "T2Reranking-query": "Given a Chinese search query, retrieve web passages that answer the question",
498
536
  "MMarcoReranking-query": "Given a Chinese search query, retrieve web passages that answer the question",
499
537
  "CMedQAv1-reranking-query": "Given a Chinese community medical question, retrieve replies that best answer the question",
500
538
  "CMedQAv2-reranking-query": "Given a Chinese community medical question, retrieve replies that best answer the question",
539
+ "Ocnli": "Retrieve semantically similar text.",
540
+ "Cmnli": "Retrieve semantically similar text.",
501
541
  "ArguAna-query": "Given a claim, find documents that refute the claim",
502
542
  "ArguAna-document": "Given a claim, find documents that refute the claim",
503
543
  "ClimateFEVER-query": "Given a claim about climate change, retrieve documents that support or refute the claim",
@@ -512,22 +552,26 @@ KaLM_X_task_prompts = {
512
552
  "NFCorpus-query": "Given a question, retrieve relevant documents that best answer the question",
513
553
  "NQ-query": "Given a question, retrieve Wikipedia passages that answer the question",
514
554
  "QuoraRetrieval-query": "Given a question, retrieve questions that are semantically equivalent to the given question",
515
- "SCIDOCS-query": "Given a scientific paper title, retrieve paper abstracts that are cited by the given paper",
555
+ "SCIDOCS-query": "Given a title of a scientific paper, retrieve the titles of other relevant papers",
516
556
  "SciFact-query": "Given a scientific claim, retrieve documents that support or refute the claim",
517
557
  "Touche2020-query": "Given a question, retrieve detailed and persuasive arguments that answer the question",
518
558
  "Touche2020Retrieval.v3-query": "Given a question, retrieve detailed and persuasive arguments that answer the question",
519
- "TRECCOVID-query": "Given a query on COVID-19, retrieve documents that answer the query",
559
+ "TRECCOVID-query": "Given a medical query, retrieve documents that answer the query",
520
560
  "T2Retrieval-query": "Given a Chinese search query, retrieve web passages that answer the question",
521
561
  "MMarcoRetrieval-query": "Given a web search query, retrieve relevant passages that answer the query",
562
+ "VoyageMMarcoReranking-query": "Given a Japanese search query, retrieve web passages that answer the question",
522
563
  "DuRetrieval-query": "Given a Chinese search query, retrieve web passages that answer the question",
523
564
  "CovidRetrieval-query": "Given a question on COVID-19, retrieve news articles that answer the question",
524
565
  "CmedqaRetrieval-query": "Given a Chinese community medical question, retrieve replies that best answer the question",
525
566
  "EcomRetrieval-query": "Given a user query from an e-commerce website, retrieve description sentences of relevant products",
526
567
  "MedicalRetrieval-query": "Given a medical question, retrieve user replies that best answer the question",
527
568
  "VideoRetrieval-query": "Given a video search query, retrieve the titles of relevant videos",
569
+ "STSBenchmarkMultilingualSTS": "Retrieve semantically similar text",
570
+ "SICKFr": "Retrieve semantically similar text",
571
+ "SummEvalFr": "Given a news summary, retrieve other semantically similar summaries",
528
572
  "MasakhaNEWSClassification": "Classify the News in the given texts into one of the seven category: politics,sports,health,business,entertainment,technology,religion ",
529
- "AlloProfClusteringP2P": "Identify the main category of Allo Prof document based on the titles and descriptions",
530
- "AlloProfClusteringS2S": "Identify the topic of document titles from Allo Prof dataset",
573
+ "OpusparcusPC": "Retrieve semantically similar text",
574
+ "PAWSX": "Retrieve semantically similar text",
531
575
  "HALClusteringS2S": "Identify the main category of academic passage based on the titles and contents",
532
576
  "MasakhaNEWSClusteringP2P": "Identify the topic or theme of the given news articles based on the titles and contents",
533
577
  "MasakhaNEWSClusteringS2S": "Identify the topic or theme of the given news articles based on the titles",
@@ -541,10 +585,23 @@ KaLM_X_task_prompts = {
541
585
  "XPQARetrieval-query": "Given a question, retrieve passages that answer the question",
542
586
  "MintakaRetrieval-query": "Given a question, retrieve passages that answer the question",
543
587
  "CBD": "Classify the sentiment of polish tweet reviews",
544
- "PolEmo2.0-IN": "Classify the sentiment of in-domain (medicine and hotels) online reviews",
545
- "PolEmo2.0-OUT": "Classify the sentiment of out-of-domain (products and school) online reviews",
588
+ "PolEmo2.0-IN": "Classify the sentiment of medicine and hotels online reviews",
589
+ "PolEmo2.0-OUT": "Classify the sentiment of products and school online reviews",
546
590
  "AllegroReviews": "Classify the sentiment of reviews from e-commerce marketplace Allegro",
547
- "PAC": 'Classify the sentence into one of the two types: "BEZPIECZNE_POSTANOWIENIE_UMOWNE" and "KLAUZULA_ABUZYWNA"',
591
+ "PAC": 'Classify Polish contract clauses into one of the following two types: "Safe Contract Clauses" and "Unfair Contract Clauses".',
592
+ "SICK-E-PL": "Retrieve semantically similar text",
593
+ "SICK-R-PL": "Retrieve semantically similar text",
594
+ "STS22": "Retrieve semantically similar text",
595
+ "AFQMC": "Retrieve semantically similar text",
596
+ "BQ": "Retrieve semantically similar text",
597
+ "LCQMC": "Retrieve semantically similar text",
598
+ "QBQTC": "Retrieve semantically similar text",
599
+ "STS12": "Retrieve semantically similar text",
600
+ "PpcPC": "Retrieve semantically similar text",
601
+ "CDSC-E": "Retrieve semantically similar text",
602
+ "BornholmBitextMining": "Retrieve parallel sentences",
603
+ "NorwegianCourtsBitextMining": "Retrieve parallel sentences",
604
+ "PSC": "Retrieve semantically similar text",
548
605
  "EightTagsClustering": "Identify of headlines from social media posts in Polish into 8 categories: film, history, food, medicine, motorization, work, sport and technology",
549
606
  "ArguAna-PL-query": "Given a claim, find documents that refute the claim",
550
607
  "DBPedia-PL-query": "Given a query, retrieve relevant entity descriptions from DBPedia",
@@ -554,9 +611,9 @@ KaLM_X_task_prompts = {
554
611
  "NFCorpus-PL-query": "Given a question, retrieve relevant documents that best answer the question",
555
612
  "NQ-PL-query": "Given a question, retrieve Wikipedia passages that answer the question",
556
613
  "Quora-PL-query": "Given a question, retrieve questions that are semantically equivalent to the given question",
557
- "SCIDOCS-PL-query": "Given a scientific paper title, retrieve paper abstracts that are cited by the given paper",
614
+ "SCIDOCS-PL-query": "Given a title of a scientific paper, retrieve the titles of other relevant papers",
558
615
  "SciFact-PL-query": "Given a scientific claim, retrieve documents that support or refute the claim",
559
- "TRECCOVID-PL-query": "Given a query on COVID-19, retrieve documents that answer the query",
616
+ "TRECCOVID-PL-query": "Given a medical query, retrieve documents that answer the query",
560
617
  "GeoreviewClassification": "Classify the organization rating based on the reviews",
561
618
  "HeadlineClassification": "Classify the topic or theme of the given news headline",
562
619
  "InappropriatenessClassification": "Classify the given message as either sensitive topic or not",
@@ -567,9 +624,12 @@ KaLM_X_task_prompts = {
567
624
  "GeoreviewClusteringP2P": "Identify the organization category based on the reviews",
568
625
  "RuSciBenchGRNTIClusteringP2P": "Identify the category of scientific papers based on the titles and abstracts",
569
626
  "RuSciBenchOECDClusteringP2P": "Identify the category of scientific papers based on the titles and abstracts",
627
+ "TERRa": "Given a premise, retrieve a hypothesis that is entailed by the premise",
570
628
  "RuBQReranking-query": "Given a question, retrieve Wikipedia passages that answer the question",
571
629
  "RiaNewsRetrieval-query": "Given a headline, retrieval relevant articles",
572
630
  "RuBQRetrieval-query": "Given a question, retrieve Wikipedia passages that answer the question",
631
+ "RUParaPhraserSTS": "Retrieve semantically similar text",
632
+ "RuSTSBenchmarkSTS": "Retrieve semantically similar text",
573
633
  "AppsRetrieval-query": "Given a question about code problem, retrieval code that can solve user's problem",
574
634
  "COIRCodeSearchNetRetrieval-query": "Given a code snippet, retrieve the comment corresponding to that code.",
575
635
  "CodeEditSearchRetrieval-query": "Given a piece of code, retrieval code that in the ",
@@ -582,8 +642,19 @@ KaLM_X_task_prompts = {
582
642
  "CosQA-query": "Given a question about coding, retrieval code or passage that can solve user's question",
583
643
  "StackOverflowQA-query": "Given a question about coding, retrieval code or passage that can solve user's question",
584
644
  "SyntheticText2SQL-query": "Given a user's question, retrieve SQL queries that are appropriate responses to the question",
585
- "BulgarianStoreReviewSentimentClassfication": "Classify user reviews into positive or negative sentiment",
586
- "CzechProductReviewSentimentClassification": "Classify product reviews into positive or negative sentiment",
645
+ "BibleNLPBitextMining": "Retrieve parallel sentences",
646
+ "BUCC.v2": "Retrieve parallel sentences",
647
+ "DiaBlaBitextMining": "Retrieve parallel sentences",
648
+ "FloresBitextMining": "Retrieve parallel sentences",
649
+ "IN22GenBitextMining": "Retrieve parallel sentences",
650
+ "IndicGenBenchFloresBitextMining": "Retrieve parallel sentences",
651
+ "NollySentiBitextMining": "Retrieve parallel sentences",
652
+ "NTREXBitextMining": "Retrieve parallel sentences",
653
+ "NusaTranslationBitextMining": "Retrieve parallel sentences",
654
+ "NusaXBitextMining": "Retrieve parallel sentences",
655
+ "Tatoeba": "Retrieve parallel sentences",
656
+ "BulgarianStoreReviewSentimentClassfication": "Classify user reviews into positive, negative or mixed sentiment",
657
+ "CzechProductReviewSentimentClassification": "Classify product reviews into positive, neutral, or negative sentiment",
587
658
  "GreekLegalCodeClassification": "Given a greek legal text, classify its topic",
588
659
  "DBpediaClassification": "Given a Wikipedia articles, categorized it into classes based on its DBpedia ontology",
589
660
  "FinancialPhrasebankClassification": "Given financial news, categorized by sentiment into positive, negative, or neutral",
@@ -611,7 +682,7 @@ KaLM_X_task_prompts = {
611
682
  "PunjabiNewsClassification": "Given a news article, categorized it into two-classes",
612
683
  "SinhalaNewsClassification": "Given a news article, categorized it into political, business, technology, sports and Entertainment",
613
684
  "CSFDSKMovieReviewSentimentClassification": "Given a movie review, classify its rating on a scale from 0 to 5",
614
- "SiswatiNewsClassification": "Given a news article, classify its topic",
685
+ "SiswatiNewsClassification": "Given a news article in Siswati, classify its topic",
615
686
  "SlovakMovieReviewSentimentClassification": "Given a movie review, categorized it into positive or negative",
616
687
  "SwahiliNewsClassification": "Given a news article, classify its domain",
617
688
  "TswanaNewsClassification": "Given a news article, classify its topic",
@@ -621,7 +692,9 @@ KaLM_X_task_prompts = {
621
692
  "ArXivHierarchicalClusteringP2P": "Identify the main and secondary category of Arxiv papers based on the titles and abstracts",
622
693
  "ArXivHierarchicalClusteringS2S": "Identify the main and secondary category of Arxiv papers based on the titles",
623
694
  "BigPatentClustering.v2": "Identify the category of documents from the Big Patent dataset",
695
+ "AlloProfClusteringS2S": "Identify the topic of document titles from Allo Prof dataset",
624
696
  "AlloProfClusteringS2S.v2": "Identify the topic of document titles from Allo Prof dataset",
697
+ "AlloProfClusteringP2P": "Identify the topic of document titles and descriptions from Allo Prof dataset",
625
698
  "HALClusteringS2S.v2": "Identify the topic of titles from HAL",
626
699
  "SIB200ClusteringS2S": "Identify the category of documents",
627
700
  "WikiClusteringP2P.v2": "Identify the category of wiki passages",
@@ -629,26 +702,60 @@ KaLM_X_task_prompts = {
629
702
  "KorHateSpeechMLClassification": "Given a Korean online news comments, classify its fine-grained hate speech classes",
630
703
  "MalteseNewsClassification": "Given a maltese new, classify its topic",
631
704
  "MultiEURLEXMultilabelClassification": "Given a text, classify its topic",
632
- "BrazilianToxicTweetsClassification": "Given a tweet, classify its topic",
705
+ "BrazilianToxicTweetsClassification": "Classify the toxic tweets in Brazilian Portuguese into one of the six categories: LGBTQ+phobia, Xenophobia, Obscene, Insult, Misogyny and Racism.",
706
+ "CTKFactsNLI": "Retrieve semantically similar text",
707
+ "indonli": "Retrieve semantically similar text",
708
+ "ArmenianParaphrasePC": "Retrieve semantically similar text",
709
+ "PawsXPairClassification": "Retrieve semantically similar text",
710
+ "RTE3": "Retrieve semantically similar text",
711
+ "XNLI": "Retrieve semantically similar text",
712
+ "GermanSTSBenchmark": "Retrieve semantically similar text",
713
+ "SICK-R": "Retrieve semantically similar text",
714
+ "STS13": "Retrieve semantically similar text",
715
+ "STS14": "Retrieve semantically similar text",
716
+ "STSBenchmark": "Retrieve semantically similar text",
717
+ "FaroeseSTS": "Retrieve semantically similar text",
718
+ "FinParaSTS": "Retrieve semantically similar text",
719
+ "JSICK": "Retrieve semantically similar text",
720
+ "IndicCrosslingualSTS": "Retrieve parallel sentences",
721
+ "SemRel24STS": "Retrieve semantically similar text",
722
+ "STS17": "Retrieve semantically similar text",
723
+ "STS22.v2": "Retrieve semantically similar text",
724
+ "STSES": "Retrieve semantically similar text",
725
+ "STSB": "Retrieve semantically similar text",
633
726
  "AILAStatutes-query": "Identifying the most relevant statutes for a given situation",
634
- "HagridRetrieval-query": "Retrieval the relevant passage for the given query",
635
- "LegalBenchCorporateLobbying-query": "Retrieval the relevant passage for the given query",
727
+ "HagridRetrieval-query": "Given an information-seeking question, retrieve the best replies to answer the question",
728
+ "LegalBenchCorporateLobbying-query": "Given a query, retrieve relevant legal bill summaries",
636
729
  "LEMBPasskeyRetrieval-query": "Retrieval the relevant passage for the given query",
637
730
  "BelebeleRetrieval-query": "Retrieval the relevant passage for the given query",
638
731
  "MLQARetrieval-query": "Retrieval the relevant passage for the given query",
639
732
  "StatcanDialogueDatasetRetrieval-query": "Retrieval the relevant passage for the given query",
640
733
  "WikipediaRetrievalMultilingual-query": "Retrieval the relevant passage for the given query",
641
- "Core17InstructionRetrieval-query": "Retrieval the relevant passage for the given query",
642
- "News21InstructionRetrieval-query": "Retrieval the relevant passage for the given query",
643
- "Robust04InstructionRetrieval-query": "Retrieval the relevant passage for the given query",
734
+ "Core17InstructionRetrieval-query": "Retrieval the relevant passage for the given query with conditions",
735
+ "News21InstructionRetrieval-query": "Retrieval the relevant passage for the given query with conditions",
736
+ "Robust04InstructionRetrieval-query": "Retrieval the relevant passage for the given query with conditions",
644
737
  "WebLINXCandidatesReranking-query": "Retrieval the relevant passage for the given query",
645
738
  "WikipediaRerankingMultilingual-query": "Retrieval the relevant passage for the given query",
739
+ "STS15": "Retrieve semantically similar text",
646
740
  "MIRACLRetrievalHardNegatives-query": "Retrieval relevant passage for the given query",
741
+ "BIOSSES": "Retrieve semantically similar text",
647
742
  "CQADupstackRetrieval-query": "Given a question, retrieve detailed question descriptions from Stackexchange that are duplicates to the given question",
648
743
  "CQADupstackGamingRetrieval-query": "Given a question, retrieve detailed question descriptions from Stackexchange that are duplicates to the given question",
649
744
  "CQADupstackGamingRetrieval-document": "Given a question, retrieve detailed question descriptions from Stackexchange that are duplicates to the given question",
650
745
  "CQADupstackUnixRetrieval-query": "Given a question, retrieve detailed question descriptions from Stackexchange that are duplicates to the given question",
651
746
  "CQADupstackUnixRetrieval-document": "Given a question, retrieve detailed question descriptions from Stackexchange that are duplicates to the given question",
747
+ "STS16": "Retrieve semantically similar text",
748
+ "SummEval": "Retrieve semantically similar text",
749
+ "ATEC": "Retrieve semantically similar text",
750
+ "ScalaClassification": "Classify passages into correct or correct in Scandinavian Languages based on linguistic acceptability",
751
+ "SpartQA-query": "Given the following spatial reasoning question, retrieve the right answer.",
752
+ "CEDRClassification": "Given a comment as query, classify expressed emotions into joy, sadness, surprise, fear, and anger",
753
+ "DalajClassification": "Classify texts based on linguistic acceptability in Swedish",
754
+ "TempReasonL1-query": "Given the following question about time, retrieve the correct answer.",
755
+ "WinoGrande-query": "Given the following sentence, retrieve an appropriate answer to fill in the missing underscored part.",
756
+ "NordicLangClassification": "Classify texts based on language",
757
+ "TwitterHjerneRetrieval-query": "Retrieve answers to questions asked in Danish tweets",
758
+ "SwednClusteringP2P": "Identify news categories in Swedish passages",
652
759
  }
653
760
 
654
761
  KaLM_INSTRUCTION = "Instruct: {instruction} \n Query: "
@@ -795,3 +902,30 @@ KaLM_Embedding_KaLM_embedding_multilingual_mini_instruct_v2_5 = ModelMeta(
795
902
  adapted_from="HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v2",
796
903
  superseded_by=None,
797
904
  )
905
+
906
+ KaLM_Embedding_gemma_3_12b_2511 = ModelMeta(
907
+ loader=InstructSentenceTransformerModel,
908
+ loader_kwargs=dict(
909
+ instruction_template=KaLM_INSTRUCTION,
910
+ max_seq_length=512,
911
+ apply_instruction_to_passages=True,
912
+ prompts_dict=KaLM_Embedding_gemma_3_12b_task_prompts,
913
+ ),
914
+ name="tencent/KaLM-Embedding-Gemma3-12B-2511",
915
+ revision="edf22f4753f58b05e3f5495818d31f12db63056d",
916
+ languages=None,
917
+ open_weights=True,
918
+ release_date="2025-11-06",
919
+ n_parameters=11.76 * 1e9,
920
+ memory_usage_mb=44884,
921
+ max_tokens=32768,
922
+ embed_dim=3840,
923
+ license=None,
924
+ reference="https://kalm-embedding.github.io/",
925
+ similarity_fn_name="cosine",
926
+ framework=["Sentence Transformers", "PyTorch"],
927
+ use_instructions=True,
928
+ public_training_code="https://github.com/HITsz-TMG/KaLM-Embedding",
929
+ public_training_data=None,
930
+ training_datasets=KaLM_Embedding_gemma_3_12b_training_data,
931
+ )
@@ -0,0 +1,27 @@
1
+ import numpy as np
2
+
3
+ from mteb.models.model_implementations.model2vec_models import Model2VecModel
4
+ from mteb.models.model_meta import ModelMeta, ScoringFunction
5
+
6
+ potion_base_8m = ModelMeta(
7
+ loader=Model2VecModel, # type: ignore
8
+ name="rasgaard/m2v-dfm-large",
9
+ languages=["dan-Latn"],
10
+ open_weights=True,
11
+ revision="387897cfb09992e6d45ea9cd7b28b9fcf119e23a",
12
+ release_date="2025-10-08",
13
+ n_parameters=22893312,
14
+ memory_usage_mb=87,
15
+ max_tokens=np.inf,
16
+ embed_dim=256,
17
+ license="mit",
18
+ similarity_fn_name=ScoringFunction.COSINE,
19
+ framework=["NumPy", "Sentence Transformers"],
20
+ reference="https://huggingface.co/minishlab/rasgaard/m2v-dfm-large",
21
+ use_instructions=False,
22
+ adapted_from="KennethEnevoldsen/dfm-sentence-encoder-large",
23
+ superseded_by=None,
24
+ training_datasets=set(), # distilled
25
+ public_training_code="https://github.com/MinishLab/model2vec",
26
+ public_training_data="https://huggingface.co/datasets/HuggingFaceFW/fineweb-2", # distilled on this
27
+ )
@@ -0,0 +1,58 @@
1
+ from mteb.models.model_implementations.google_models import gemma_embedding_loader
2
+ from mteb.models.model_meta import ModelMeta
3
+
4
+ Tarka_Embedding_150M_V1_CITATION = """@misc{tarka_ai_research_2025,
5
+ author = { Tarka AI Research },
6
+ title = { Tarka-Embedding-150M-V1 (Revision c5f4f43) },
7
+ year = 2025,
8
+ url = { https://huggingface.co/Tarka-AIR/Tarka-Embedding-150M-V1 },
9
+ doi = { 10.57967/hf/6875 },
10
+ publisher = { Hugging Face }
11
+ }"""
12
+
13
+ MULTILINGUAL_EVALUATED_LANGUAGES = [
14
+ "arb-Arab",
15
+ "eng-Latn",
16
+ "spa-Latn",
17
+ "deu-Latn",
18
+ "fra-Latn",
19
+ "jpn-Jpan",
20
+ "kor-Hang",
21
+ "zho-Hans",
22
+ ]
23
+
24
+ training_data = {
25
+ "T2Retrieval",
26
+ "DuRetrieval",
27
+ "MMarcoReranking",
28
+ "CMedQAv2-reranking",
29
+ "NQ",
30
+ "MSMARCO",
31
+ "HotpotQA",
32
+ "FEVER",
33
+ "MrTidyRetrieval",
34
+ "MIRACLRetrieval",
35
+ "CodeSearchNet",
36
+ }
37
+
38
+ tarka_embedding_150m_v1 = ModelMeta(
39
+ loader=gemma_embedding_loader,
40
+ name="Tarka-AIR/Tarka-Embedding-150M-V1",
41
+ languages=MULTILINGUAL_EVALUATED_LANGUAGES,
42
+ open_weights=True,
43
+ revision="b0ffecc4ef0d873e517507ed080e43b88b2704b9",
44
+ release_date="2025-11-04",
45
+ n_parameters=155_714_304,
46
+ embed_dim=768,
47
+ max_tokens=2048,
48
+ license="gemma",
49
+ reference="https://huggingface.co/Tarka-AIR/Tarka-Embedding-150M-V1",
50
+ framework=["Sentence Transformers", "PyTorch"],
51
+ use_instructions=True,
52
+ public_training_code=None,
53
+ public_training_data=None,
54
+ training_datasets=training_data,
55
+ similarity_fn_name="cosine",
56
+ memory_usage_mb=576,
57
+ citation=Tarka_Embedding_150M_V1_CITATION,
58
+ )
@@ -191,6 +191,7 @@ class SearchEncoderWrapper:
191
191
  cos_scores_top_k_idx = cos_scores_top_k_idx.cpu().tolist()
192
192
  cos_scores_top_k_values = cos_scores_top_k_values.cpu().tolist()
193
193
 
194
+ sub_corpus_ids = list(sub_corpus_ids)
194
195
  for query_itr in range(len(query_embeddings)):
195
196
  query_id = query_idx_to_id[query_itr]
196
197
  for sub_corpus_id, score in zip(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mteb
3
- Version: 2.1.4
3
+ Version: 2.1.6
4
4
  Summary: Massive Text Embedding Benchmark
5
5
  Author-email: MTEB Contributors <niklas@huggingface.co>, Kenneth Enevoldsen <kenneth.enevoldsen@cas.au.dk>, Nouamane Tazi <nouamane@huggingface.co>, Nils Reimers <info@nils-reimers.de>
6
6
  Maintainer-email: Kenneth Enevoldsen <kenneth.enevoldsen@cas.au.dk>, Roman Solomatin <risolomatin@gmail.com>, Isaac Chung <chungisaac1217@gmail.com>
@@ -16,7 +16,7 @@ Classifier: Intended Audience :: Developers
16
16
  Classifier: Intended Audience :: Information Technology
17
17
  Classifier: Operating System :: OS Independent
18
18
  Classifier: Programming Language :: Python
19
- Requires-Python: <3.14,>=3.10
19
+ Requires-Python: <3.15,>=3.10
20
20
  Description-Content-Type: text/markdown
21
21
  License-File: LICENSE
22
22
  Requires-Dist: datasets>=2.19.0
@@ -1419,7 +1419,7 @@ mteb/models/get_model_meta.py,sha256=VpZZNINk-QrNeVpPZnlqzlLhtBs8G84eRwTzAb_gRD4
1419
1419
  mteb/models/instruct_wrapper.py,sha256=HxHmnlxkjtZhfgTZRYJBT3Nma7Dhx6a9e2Bg-cO_IYs,8844
1420
1420
  mteb/models/model_meta.py,sha256=b-Nel9nX5bJk4cgJnqkBzEKyMY7uXvxlCBSxmmH1Ios,14769
1421
1421
  mteb/models/models_protocols.py,sha256=D2hYWn_UBGMaKtRwBx3u0B0ni6lHJjSzTxX21XFNwIc,8917
1422
- mteb/models/search_wrappers.py,sha256=0McxwGnqyiYKPHjHsxWZp1pP9qGuHemZjeX1z5ZgNAI,15588
1422
+ mteb/models/search_wrappers.py,sha256=W99EeMDQ58N0auVp2-7T39orG7GZLekSsVCdxEZLxw0,15638
1423
1423
  mteb/models/sentence_transformer_wrapper.py,sha256=n5CMsM6Lpg_CFHH0NkpJusMsaLUTt-L9vRmFINQ961k,12338
1424
1424
  mteb/models/cache_wrappers/__init__.py,sha256=j3JBHN73Tr7uMUO92FEvKXstnybxrPpGWmKXU2lAoIE,88
1425
1425
  mteb/models/cache_wrappers/cache_backend_protocol.py,sha256=TR7kD7KbN1J4piszIecpegtLZYGy7sRHZt3SDWlImKk,1665
@@ -1448,7 +1448,7 @@ mteb/models/model_implementations/codesage_models.py,sha256=fy0_WYcU4DnYH9DQ1C5F
1448
1448
  mteb/models/model_implementations/cohere_models.py,sha256=H7Mjn57kmeBhIaJx6riaCEEgZS01YBWBBGPagEU87ZQ,13690
1449
1449
  mteb/models/model_implementations/cohere_v.py,sha256=K6VEw1NkyM2PuMd18kHE6aqPrcByYSwEmAKjvLods_w,15760
1450
1450
  mteb/models/model_implementations/colpali_models.py,sha256=uVmK3jXO-GDn-7i6cJFWdc0u0-MU3INHHL1rXUPhBec,8944
1451
- mteb/models/model_implementations/colqwen_models.py,sha256=G_yNAQRI9Evf1x2qsLHOZX687iYy4WIsnlR0U-jHJzo,6804
1451
+ mteb/models/model_implementations/colqwen_models.py,sha256=Y6IBhYKbxjkC3AePa0l37-F50xcX1mtPCXpgW597HyA,8754
1452
1452
  mteb/models/model_implementations/colsmol_models.py,sha256=O2M7Ksydh94M_Iax4KytHb-wOL18N0BIYLKSsLF8BFs,2967
1453
1453
  mteb/models/model_implementations/conan_models.py,sha256=G-s7xo9VtNX-f7lWKtYVGHHiMMN0Xp44PlNIp7E0LAo,6502
1454
1454
  mteb/models/model_implementations/dino_models.py,sha256=QFgaFHR5YKrylqJGSljXCBn2W7qHhmF6KdXkvHrQNEI,16380
@@ -1471,7 +1471,7 @@ mteb/models/model_implementations/inf_models.py,sha256=lvXUFhAYDltq2_Xa9MHcwfhh1
1471
1471
  mteb/models/model_implementations/jasper_models.py,sha256=KzjVnQ1HwaVO9Z7kk1ZkjFrhvlKupeWCmkSljnZv-IM,4071
1472
1472
  mteb/models/model_implementations/jina_clip.py,sha256=CfiIxbhKspjQajNtObCfGPHOWPk6uLn4cuwydQHFTMo,5118
1473
1473
  mteb/models/model_implementations/jina_models.py,sha256=QWoesiTygdFTLcdGpdx26wOUI1AXRz3jLmxGHJ0WMNE,29919
1474
- mteb/models/model_implementations/kalm_models.py,sha256=ruNzMk3MRLW0up91aXwPIMqorRyujDF291B78D_m7bM,52827
1474
+ mteb/models/model_implementations/kalm_models.py,sha256=FmW7Z5Qs6WYBLuKvql3u4IJW36kj4k-Ypah8qTBEBkg,59837
1475
1475
  mteb/models/model_implementations/lens_models.py,sha256=fC7_NB1F8vBAlXD0p0-hALf6eZTPFJwpz57dy71OlwI,1696
1476
1476
  mteb/models/model_implementations/lgai_embedding_models.py,sha256=S83pbfkMH3YUNl4skusgbK-Rn-uLuScQVxgXwegR_N4,2333
1477
1477
  mteb/models/model_implementations/linq_models.py,sha256=rnW27MybLMQ2Y3OxDyBTMSIsx_hXC0DlMD4kFv7NJV0,1918
@@ -1505,6 +1505,7 @@ mteb/models/model_implementations/qtack_models.py,sha256=biZLH5E3UWIcMZXIZNGgBZF
1505
1505
  mteb/models/model_implementations/qwen3_models.py,sha256=IpPJC_y-Kt_1Bq6nT2lX3-lMl-hl0BV6VhAL8Czfyfg,5133
1506
1506
  mteb/models/model_implementations/qzhou_models.py,sha256=90tsGftdCX90Gvu63bxtMyzczGi_u9bsUlg4WAVeH9Q,3536
1507
1507
  mteb/models/model_implementations/random_baseline.py,sha256=NH-epZ73BYvV6qYgu09BsoiFxBWypUNLdJIXRp5QPhM,8030
1508
+ mteb/models/model_implementations/rasgaard_models.py,sha256=fHMXP4S17fZQ93n2KkUsAoxyGmVyCNexz6ek6c4zO0E,1021
1508
1509
  mteb/models/model_implementations/reasonir_model.py,sha256=wSCcJpUgZ0pG2g3vTEzYNmPlPG_CVn_rR0ENVCines0,2218
1509
1510
  mteb/models/model_implementations/repllama_models.py,sha256=89HoqEpzkNysHeuf_-YhU8WETamHTogSRztGIRo6G1s,7321
1510
1511
  mteb/models/model_implementations/rerankers_custom.py,sha256=ro73A9-hHudy3_qIMrhP-ja-3Xqu78r_aORm856zHQc,10651
@@ -1521,6 +1522,7 @@ mteb/models/model_implementations/shuu_model.py,sha256=KkcuVYjIzoha3Fvxh8ppqHQ9B
1521
1522
  mteb/models/model_implementations/siglip_models.py,sha256=tvi8QB2ayBoeXsxwHrl5RFlkknvE6FM9N06zSBWGQD0,12602
1522
1523
  mteb/models/model_implementations/sonar_models.py,sha256=Nc6kAJRWSrxA57DPRrgOPHqS1dNhz2vsE_1ZA2JtigQ,4784
1523
1524
  mteb/models/model_implementations/stella_models.py,sha256=NL3tk-rnuBdznsQ-nmelqun4tFO2xKoNPPOOVKqnPGU,8062
1525
+ mteb/models/model_implementations/tarka_models.py,sha256=D3b8lPMuSZTKBCDqL5ozI_5lf5og0qKJQD4HBhXyi7o,1601
1524
1526
  mteb/models/model_implementations/text2vec_models.py,sha256=zaHWRc2W0RYZAOetinqRzug9UGW0HmY5U-jYsLXA8wo,4160
1525
1527
  mteb/models/model_implementations/ua_sentence_models.py,sha256=fcvXR4-Rrt-UDTlDkh2ZAO1gO_ufCOHiT6EhoeKiHx8,1224
1526
1528
  mteb/models/model_implementations/uae_models.py,sha256=KZxH5a3t-sfh33xUBkLizEuyFAyPlGfnRsn-S7mjq74,3112
@@ -2536,9 +2538,9 @@ mteb/types/_metadata.py,sha256=NN-W0S6a5TDV7UkpRx1pyWtGF4TyyCyoPUfHOwdeci8,2290
2536
2538
  mteb/types/_result.py,sha256=CRAUc5IvqI3_9SyXDwv-PWLCXwXdZem9RePeYESRtuw,996
2537
2539
  mteb/types/_string_validators.py,sha256=PY-dYq4E8O50VS3bLYdldPWp400fl_WzUjfVSkNWe8U,523
2538
2540
  mteb/types/statistics.py,sha256=YwJsxTf1eaCI_RE-J37a-gK5wDeGAsmkeZKoZCFihSo,3755
2539
- mteb-2.1.4.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
2540
- mteb-2.1.4.dist-info/METADATA,sha256=3HJ0LwqN5B9zLnqA4j5PsmVknk75iAHG-qwtZqJdKW0,13573
2541
- mteb-2.1.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
2542
- mteb-2.1.4.dist-info/entry_points.txt,sha256=8IJoEJFKoDHmVnNev-qJ9pp4Ln7_1-ma9QsXnzVCzGU,39
2543
- mteb-2.1.4.dist-info/top_level.txt,sha256=OLVIjcQAlWBz0bdmutKlWHLF42FF0hp4uVAg3ZyiG4U,5
2544
- mteb-2.1.4.dist-info/RECORD,,
2541
+ mteb-2.1.6.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
2542
+ mteb-2.1.6.dist-info/METADATA,sha256=hB6t2OTnS5La-oqlUBIi67CI21acqqhYkdVSnXLriGU,13573
2543
+ mteb-2.1.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
2544
+ mteb-2.1.6.dist-info/entry_points.txt,sha256=8IJoEJFKoDHmVnNev-qJ9pp4Ln7_1-ma9QsXnzVCzGU,39
2545
+ mteb-2.1.6.dist-info/top_level.txt,sha256=OLVIjcQAlWBz0bdmutKlWHLF42FF0hp4uVAg3ZyiG4U,5
2546
+ mteb-2.1.6.dist-info/RECORD,,
File without changes