mteb 2.1.4__py3-none-any.whl → 2.5.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mteb/__init__.py +4 -0
- mteb/_create_dataloaders.py +6 -3
- mteb/_evaluators/any_sts_evaluator.py +21 -12
- mteb/_evaluators/classification_metrics.py +54 -0
- mteb/_evaluators/clustering_evaluator.py +1 -1
- mteb/_evaluators/image/imagetext_pairclassification_evaluator.py +9 -4
- mteb/_evaluators/pair_classification_evaluator.py +30 -38
- mteb/_evaluators/sklearn_evaluator.py +15 -28
- mteb/_evaluators/text/bitext_mining_evaluator.py +4 -1
- mteb/_evaluators/text/summarization_evaluator.py +4 -2
- mteb/_evaluators/zeroshot_classification_evaluator.py +2 -2
- mteb/abstasks/_data_filter/__init__.py +0 -0
- mteb/abstasks/_data_filter/filters.py +125 -0
- mteb/abstasks/_data_filter/task_pipelines.py +102 -0
- mteb/abstasks/_statistics_calculation.py +6 -2
- mteb/abstasks/classification.py +0 -2
- mteb/abstasks/clustering.py +1 -1
- mteb/abstasks/clustering_legacy.py +3 -0
- mteb/abstasks/multilabel_classification.py +10 -3
- mteb/abstasks/pair_classification.py +8 -1
- mteb/abstasks/sts.py +7 -0
- mteb/abstasks/task_metadata.py +1 -0
- mteb/benchmarks/_create_table.py +84 -37
- mteb/benchmarks/benchmark.py +74 -15
- mteb/benchmarks/benchmarks/__init__.py +8 -0
- mteb/benchmarks/benchmarks/benchmarks.py +259 -15
- mteb/benchmarks/get_benchmark.py +2 -0
- mteb/cache.py +47 -10
- mteb/deprecated_evaluator.py +8 -13
- mteb/descriptive_stats/BitextMining/RuSciBenchBitextMining.v2.json +61 -0
- mteb/descriptive_stats/Classification/HebrewSentimentAnalysis.v3.json +60 -0
- mteb/descriptive_stats/Classification/TurkishConstitutionalCourtViolation.json +54 -0
- mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3ComputerScienceRetrieval.json +214 -0
- mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3EnergyRetrieval.json +214 -0
- mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3FinanceEnRetrieval.json +214 -0
- mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3FinanceFrRetrieval.json +214 -0
- mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3HrRetrieval.json +214 -0
- mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3IndustrialRetrieval.json +214 -0
- mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3NuclearRetrieval.json +214 -0
- mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3PharmaceuticalsRetrieval.json +214 -0
- mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3PhysicsRetrieval.json +214 -0
- mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3TelecomRetrieval.json +214 -0
- mteb/descriptive_stats/PairClassification/TERRa.V2.json +35 -0
- mteb/descriptive_stats/Reranking/JQaRARerankingLite.json +35 -0
- mteb/descriptive_stats/Reranking/JaCWIRRerankingLite.json +35 -0
- mteb/descriptive_stats/Reranking/MultiLongDocReranking.json +466 -0
- mteb/descriptive_stats/Retrieval/ArguAna-NL.v2.json +30 -0
- mteb/descriptive_stats/Retrieval/JaCWIRRetrievalLite.json +30 -0
- mteb/descriptive_stats/Retrieval/JaqketRetrievalLite.json +30 -0
- mteb/descriptive_stats/Retrieval/MIRACLJaRetrievalLite.json +30 -0
- mteb/descriptive_stats/Retrieval/MrTyDiJaRetrievalLite.json +30 -0
- mteb/descriptive_stats/Retrieval/NFCorpus-NL.v2.json +30 -0
- mteb/descriptive_stats/Retrieval/SCIDOCS-NL.v2.json +30 -0
- mteb/descriptive_stats/Retrieval/SQuADKorV1Retrieval.json +30 -0
- mteb/descriptive_stats/Retrieval/SciFact-NL.v2.json +30 -0
- mteb/evaluate.py +65 -45
- mteb/leaderboard/app.py +268 -133
- mteb/leaderboard/benchmark_selector.py +14 -5
- mteb/leaderboard/figures.py +13 -15
- mteb/leaderboard/table.py +82 -17
- mteb/models/__init__.py +4 -1
- mteb/models/abs_encoder.py +21 -17
- mteb/models/cache_wrappers/__init__.py +2 -1
- mteb/models/cache_wrappers/cache_backends/_hash_utils.py +2 -2
- mteb/models/cache_wrappers/cache_wrapper.py +1 -1
- mteb/models/get_model_meta.py +3 -114
- mteb/models/instruct_wrapper.py +5 -1
- mteb/models/model_implementations/align_models.py +7 -0
- mteb/models/model_implementations/amazon_models.py +1 -0
- mteb/models/model_implementations/andersborges.py +65 -0
- mteb/models/model_implementations/ara_models.py +8 -0
- mteb/models/model_implementations/arctic_models.py +8 -0
- mteb/models/model_implementations/b1ade_models.py +1 -0
- mteb/models/model_implementations/bedrock_models.py +4 -0
- mteb/models/model_implementations/bge_models.py +60 -0
- mteb/models/model_implementations/bica_model.py +35 -0
- mteb/models/model_implementations/blip2_models.py +11 -0
- mteb/models/model_implementations/blip_models.py +27 -0
- mteb/models/model_implementations/bm25.py +1 -0
- mteb/models/model_implementations/bmretriever_models.py +4 -0
- mteb/models/model_implementations/cadet_models.py +9 -0
- mteb/models/model_implementations/cde_models.py +14 -0
- mteb/models/model_implementations/clip_models.py +3 -0
- mteb/models/model_implementations/clips_models.py +100 -0
- mteb/models/model_implementations/codefuse_models.py +162 -0
- mteb/models/model_implementations/codesage_models.py +15 -0
- mteb/models/model_implementations/cohere_models.py +8 -1
- mteb/models/model_implementations/cohere_v.py +5 -0
- mteb/models/model_implementations/colpali_models.py +14 -6
- mteb/models/model_implementations/colqwen_models.py +271 -1
- mteb/models/model_implementations/colsmol_models.py +2 -0
- mteb/models/model_implementations/conan_models.py +1 -0
- mteb/models/model_implementations/dino_models.py +171 -0
- mteb/models/model_implementations/e5_instruct.py +4 -0
- mteb/models/model_implementations/e5_models.py +12 -101
- mteb/models/model_implementations/e5_v.py +1 -0
- mteb/models/model_implementations/eagerworks_models.py +164 -0
- mteb/models/model_implementations/emillykkejensen_models.py +91 -0
- mteb/models/model_implementations/en_code_retriever.py +1 -0
- mteb/models/model_implementations/euler_models.py +32 -0
- mteb/models/model_implementations/evaclip_models.py +4 -0
- mteb/models/model_implementations/fa_models.py +58 -0
- mteb/models/model_implementations/facebookai.py +193 -0
- mteb/models/model_implementations/geogpt_models.py +1 -0
- mteb/models/model_implementations/gme_v_models.py +11 -5
- mteb/models/model_implementations/google_models.py +16 -5
- mteb/models/model_implementations/granite_vision_embedding_models.py +7 -2
- mteb/models/model_implementations/gritlm_models.py +2 -0
- mteb/models/model_implementations/gte_models.py +78 -0
- mteb/models/model_implementations/hinvec_models.py +1 -0
- mteb/models/model_implementations/human.py +1 -0
- mteb/models/model_implementations/ibm_granite_models.py +6 -0
- mteb/models/model_implementations/inf_models.py +2 -0
- mteb/models/model_implementations/jasper_models.py +255 -2
- mteb/models/model_implementations/jina_clip.py +1 -0
- mteb/models/model_implementations/jina_models.py +209 -5
- mteb/models/model_implementations/kalm_models.py +203 -25
- mteb/models/model_implementations/kblab.py +31 -0
- mteb/models/model_implementations/kennethenevoldsen_models.py +74 -0
- mteb/models/model_implementations/kfst.py +25 -0
- mteb/models/model_implementations/kowshik24_models.py +32 -0
- mteb/models/model_implementations/lens_models.py +2 -0
- mteb/models/model_implementations/lgai_embedding_models.py +1 -0
- mteb/models/model_implementations/linq_models.py +3 -2
- mteb/models/model_implementations/listconranker.py +1 -1
- mteb/models/model_implementations/llm2clip_models.py +3 -0
- mteb/models/model_implementations/llm2vec_models.py +8 -0
- mteb/models/model_implementations/mcinext_models.py +3 -0
- mteb/models/model_implementations/mdbr_models.py +2 -0
- mteb/models/model_implementations/misc_models.py +362 -0
- mteb/models/model_implementations/mme5_models.py +1 -0
- mteb/models/model_implementations/moco_models.py +11 -0
- mteb/models/model_implementations/mod_models.py +191 -0
- mteb/models/model_implementations/model2vec_models.py +13 -0
- mteb/models/model_implementations/moka_models.py +3 -0
- mteb/models/model_implementations/mxbai_models.py +9 -0
- mteb/models/model_implementations/nbailab.py +70 -0
- mteb/models/model_implementations/no_instruct_sentence_models.py +1 -0
- mteb/models/model_implementations/nomic_models.py +156 -4
- mteb/models/model_implementations/nomic_models_vision.py +7 -2
- mteb/models/model_implementations/nvidia_llama_nemoretriever_colemb.py +23 -16
- mteb/models/model_implementations/nvidia_models.py +4 -1
- mteb/models/model_implementations/octen_models.py +195 -0
- mteb/models/model_implementations/openai_models.py +20 -16
- mteb/models/model_implementations/openclip_models.py +24 -0
- mteb/models/model_implementations/opensearch_neural_sparse_models.py +5 -0
- mteb/models/model_implementations/ops_moa_models.py +4 -2
- mteb/models/model_implementations/pawan_models.py +39 -0
- mteb/models/model_implementations/piccolo_models.py +8 -0
- mteb/models/model_implementations/promptriever_models.py +8 -4
- mteb/models/model_implementations/pylate_models.py +37 -4
- mteb/models/model_implementations/qodo_models.py +2 -0
- mteb/models/model_implementations/qtack_models.py +1 -0
- mteb/models/model_implementations/qwen3_models.py +6 -3
- mteb/models/model_implementations/qzhou_models.py +3 -1
- mteb/models/model_implementations/random_baseline.py +16 -21
- mteb/models/model_implementations/rasgaard_models.py +34 -0
- mteb/models/model_implementations/reasonir_model.py +1 -0
- mteb/models/model_implementations/repllama_models.py +2 -0
- mteb/models/model_implementations/rerankers_custom.py +3 -3
- mteb/models/model_implementations/rerankers_monot5_based.py +14 -14
- mteb/models/model_implementations/richinfoai_models.py +1 -0
- mteb/models/model_implementations/ru_sentence_models.py +51 -0
- mteb/models/model_implementations/ruri_models.py +322 -0
- mteb/models/model_implementations/salesforce_models.py +3 -0
- mteb/models/model_implementations/samilpwc_models.py +1 -0
- mteb/models/model_implementations/sarashina_embedding_models.py +168 -0
- mteb/models/model_implementations/searchmap_models.py +1 -0
- mteb/models/model_implementations/seed_1_6_embedding_models.py +8 -2
- mteb/models/model_implementations/seed_1_6_embedding_models_1215.py +658 -0
- mteb/models/model_implementations/seed_models.py +1 -0
- mteb/models/model_implementations/sentence_transformers_models.py +57 -0
- mteb/models/model_implementations/shuu_model.py +32 -31
- mteb/models/model_implementations/siglip_models.py +10 -0
- mteb/models/model_implementations/sonar_models.py +1 -0
- mteb/models/model_implementations/spartan8806_atles_champion.py +34 -0
- mteb/models/model_implementations/stella_models.py +6 -0
- mteb/models/model_implementations/tarka_models.py +376 -0
- mteb/models/model_implementations/ua_sentence_models.py +10 -0
- mteb/models/model_implementations/uae_models.py +1 -0
- mteb/models/model_implementations/vdr_models.py +2 -0
- mteb/models/model_implementations/vi_vn_models.py +39 -0
- mteb/models/model_implementations/vista_models.py +2 -0
- mteb/models/model_implementations/vlm2vec_models.py +2 -0
- mteb/models/model_implementations/voyage_models.py +15 -0
- mteb/models/model_implementations/voyage_v.py +8 -2
- mteb/models/model_implementations/xyz_models.py +1 -0
- mteb/models/model_implementations/youtu_models.py +1 -0
- mteb/models/model_implementations/yuan_models.py +34 -0
- mteb/models/model_implementations/yuan_models_en.py +58 -0
- mteb/models/model_meta.py +442 -22
- mteb/models/search_encoder_index/__init__.py +7 -0
- mteb/models/search_encoder_index/search_backend_protocol.py +50 -0
- mteb/models/search_encoder_index/search_indexes/__init__.py +5 -0
- mteb/models/search_encoder_index/search_indexes/faiss_search_index.py +157 -0
- mteb/models/search_wrappers.py +165 -48
- mteb/models/sentence_transformer_wrapper.py +2 -7
- mteb/results/benchmark_results.py +88 -47
- mteb/results/model_result.py +11 -4
- mteb/results/task_result.py +37 -19
- mteb/similarity_functions.py +49 -0
- mteb/tasks/bitext_mining/multilingual/__init__.py +2 -1
- mteb/tasks/bitext_mining/multilingual/bucc_bitext_mining.py +4 -2
- mteb/tasks/bitext_mining/multilingual/bucc_bitext_mining_fast.py +1 -1
- mteb/tasks/bitext_mining/multilingual/ru_sci_bench_bitext_mining.py +47 -5
- mteb/tasks/bitext_mining/multilingual/web_faq_bitext_mining.py +2 -6
- mteb/tasks/classification/ara/ajgt.py +1 -2
- mteb/tasks/classification/ara/hotel_review_sentiment_classification.py +1 -2
- mteb/tasks/classification/ara/online_store_review_sentiment_classification.py +1 -2
- mteb/tasks/classification/ara/restaurant_review_sentiment_classification.py +1 -2
- mteb/tasks/classification/ara/tweet_emotion_classification.py +1 -2
- mteb/tasks/classification/ara/tweet_sarcasm_classification.py +1 -2
- mteb/tasks/classification/ben/bengali_document_classification.py +1 -2
- mteb/tasks/classification/ben/bengali_hate_speech_classification.py +1 -2
- mteb/tasks/classification/ben/bengali_sentiment_analysis.py +1 -2
- mteb/tasks/classification/ces/csfdcz_movie_review_sentiment_classification.py +1 -2
- mteb/tasks/classification/ces/czech_product_review_sentiment_classification.py +1 -2
- mteb/tasks/classification/ces/czech_so_me_sentiment_classification.py +1 -2
- mteb/tasks/classification/dan/angry_tweets_classification.py +1 -2
- mteb/tasks/classification/dan/danish_political_comments_classification.py +1 -2
- mteb/tasks/classification/dan/ddisco_cohesion_classification.py +1 -2
- mteb/tasks/classification/dan/dk_hate_classification.py +1 -2
- mteb/tasks/classification/deu/german_politicians_twitter_sentiment_classification.py +1 -2
- mteb/tasks/classification/deu/ten_k_gnad_classification.py +1 -2
- mteb/tasks/classification/eng/amazon_polarity_classification.py +1 -2
- mteb/tasks/classification/eng/arxiv_classification.py +1 -2
- mteb/tasks/classification/eng/banking77_classification.py +1 -2
- mteb/tasks/classification/eng/dbpedia_classification.py +1 -2
- mteb/tasks/classification/eng/emotion_classification.py +1 -2
- mteb/tasks/classification/eng/financial_phrasebank_classification.py +1 -2
- mteb/tasks/classification/eng/frenk_en_classification.py +1 -2
- mteb/tasks/classification/eng/gtsrb_classification.py +1 -1
- mteb/tasks/classification/eng/imdb_classification.py +1 -2
- mteb/tasks/classification/eng/legal_bench_classification.py +14 -120
- mteb/tasks/classification/eng/news_classification.py +1 -2
- mteb/tasks/classification/eng/patch_camelyon_classification.py +1 -1
- mteb/tasks/classification/eng/patent_classification.py +1 -2
- mteb/tasks/classification/eng/poem_sentiment_classification.py +1 -2
- mteb/tasks/classification/eng/sds_eye_protection_classification.py +1 -2
- mteb/tasks/classification/eng/sds_gloves_classification.py +1 -2
- mteb/tasks/classification/eng/toxic_chat_classification.py +2 -19
- mteb/tasks/classification/eng/toxic_conversations_classification.py +1 -2
- mteb/tasks/classification/eng/tweet_sentiment_extraction_classification.py +1 -2
- mteb/tasks/classification/eng/tweet_topic_single_classification.py +2 -13
- mteb/tasks/classification/eng/ucf101_classification.py +1 -5
- mteb/tasks/classification/eng/wikipedia_bio_met_chem_classification.py +1 -2
- mteb/tasks/classification/eng/wikipedia_chem_fields_classification.py +1 -2
- mteb/tasks/classification/eng/wikipedia_comp_chem_spectroscopy_classification.py +1 -2
- mteb/tasks/classification/eng/wikipedia_crystallography_analytical_classification.py +1 -2
- mteb/tasks/classification/eng/wikipedia_theoretical_applied_classification.py +1 -2
- mteb/tasks/classification/eng/yahoo_answers_topics_classification.py +1 -2
- mteb/tasks/classification/eng/yelp_review_full_classification.py +1 -2
- mteb/tasks/classification/est/estonian_valence.py +1 -2
- mteb/tasks/classification/fas/fa_mteb_classification.py +7 -14
- mteb/tasks/classification/fil/filipino_hate_speech_classification.py +1 -2
- mteb/tasks/classification/fin/fin_toxicity_classification.py +2 -11
- mteb/tasks/classification/fra/french_book_reviews.py +1 -2
- mteb/tasks/classification/fra/movie_review_sentiment_classification.py +1 -2
- mteb/tasks/classification/guj/gujarati_news_classification.py +1 -2
- mteb/tasks/classification/heb/__init__.py +6 -1
- mteb/tasks/classification/heb/hebrew_sentiment_analysis.py +62 -4
- mteb/tasks/classification/hin/hindi_discourse_classification.py +1 -2
- mteb/tasks/classification/hin/sentiment_analysis_hindi.py +1 -2
- mteb/tasks/classification/hrv/frenk_hr_classification.py +1 -2
- mteb/tasks/classification/ind/indonesian_id_clickbait_classification.py +1 -2
- mteb/tasks/classification/ind/indonesian_mongabay_conservation_classification.py +1 -2
- mteb/tasks/classification/ita/italian_linguist_acceptability_classification.py +1 -2
- mteb/tasks/classification/jav/javanese_imdb_classification.py +1 -2
- mteb/tasks/classification/jpn/wrime_classification.py +1 -2
- mteb/tasks/classification/kan/kannada_news_classification.py +1 -2
- mteb/tasks/classification/kor/klue_tc.py +1 -2
- mteb/tasks/classification/kor/kor_hate_classification.py +2 -17
- mteb/tasks/classification/kor/kor_sarcasm_classification.py +2 -19
- mteb/tasks/classification/kur/kurdish_sentiment_classification.py +1 -2
- mteb/tasks/classification/mal/malayalam_news_classification.py +1 -2
- mteb/tasks/classification/mar/marathi_news_classification.py +1 -2
- mteb/tasks/classification/mkd/macedonian_tweet_sentiment_classification.py +1 -2
- mteb/tasks/classification/multilingual/catalonia_tweet_classification.py +1 -6
- mteb/tasks/classification/multilingual/multi_hate_classification.py +1 -4
- mteb/tasks/classification/multilingual/ru_sci_bench_classification.py +4 -23
- mteb/tasks/classification/multilingual/scala_classification.py +1 -2
- mteb/tasks/classification/multilingual/sib200_classification.py +1 -6
- mteb/tasks/classification/mya/myanmar_news.py +1 -2
- mteb/tasks/classification/nep/nepali_news_classification.py +1 -2
- mteb/tasks/classification/nld/dutch_book_review_sentiment_classification.py +4 -2
- mteb/tasks/classification/nld/dutch_cola_classification.py +3 -0
- mteb/tasks/classification/nld/dutch_government_bias_classification.py +3 -0
- mteb/tasks/classification/nld/dutch_news_articles_classification.py +3 -0
- mteb/tasks/classification/nld/dutch_sarcastic_headlines_classification.py +3 -0
- mteb/tasks/classification/nld/iconclass_classification.py +3 -0
- mteb/tasks/classification/nld/open_tender_classification.py +3 -0
- mteb/tasks/classification/nld/vaccin_chat_nl_classification.py +3 -0
- mteb/tasks/classification/nob/no_rec_classification.py +1 -2
- mteb/tasks/classification/nob/norwegian_parliament_classification.py +1 -2
- mteb/tasks/classification/ory/odia_news_classification.py +1 -2
- mteb/tasks/classification/pol/polish_classification.py +3 -6
- mteb/tasks/classification/ron/moroco.py +1 -2
- mteb/tasks/classification/ron/romanian_reviews_sentiment.py +1 -2
- mteb/tasks/classification/ron/romanian_sentiment_classification.py +1 -2
- mteb/tasks/classification/rus/georeview_classification.py +1 -2
- mteb/tasks/classification/rus/headline_classification.py +1 -2
- mteb/tasks/classification/rus/inappropriateness_classification.py +1 -2
- mteb/tasks/classification/rus/ru_reviews_classification.py +1 -2
- mteb/tasks/classification/rus/ru_toixic_classification_okmlcup.py +1 -2
- mteb/tasks/classification/rus/senti_ru_eval.py +1 -2
- mteb/tasks/classification/sin/sinhala_news_classification.py +1 -2
- mteb/tasks/classification/sin/sinhala_news_source_classification.py +1 -2
- mteb/tasks/classification/slk/csfdsk_movie_review_sentiment_classification.py +1 -2
- mteb/tasks/classification/slk/slovak_hate_speech_classification.py +1 -2
- mteb/tasks/classification/slk/slovak_movie_review_sentiment_classification.py +1 -2
- mteb/tasks/classification/slv/frenk_sl_classification.py +1 -2
- mteb/tasks/classification/spa/spanish_news_classification.py +1 -2
- mteb/tasks/classification/spa/spanish_sentiment_classification.py +1 -2
- mteb/tasks/classification/ssw/siswati_news_classification.py +1 -2
- mteb/tasks/classification/swa/swahili_news_classification.py +1 -2
- mteb/tasks/classification/swe/dalaj_classification.py +1 -2
- mteb/tasks/classification/swe/swe_rec_classification.py +1 -2
- mteb/tasks/classification/swe/swedish_sentiment_classification.py +1 -2
- mteb/tasks/classification/tam/tamil_news_classification.py +1 -2
- mteb/tasks/classification/tel/telugu_andhra_jyoti_news_classification.py +1 -2
- mteb/tasks/classification/tha/wisesight_sentiment_classification.py +1 -2
- mteb/tasks/classification/tsn/tswana_news_classification.py +1 -2
- mteb/tasks/classification/tur/__init__.py +4 -0
- mteb/tasks/classification/tur/turkish_constitutional_court.py +41 -0
- mteb/tasks/classification/tur/turkish_movie_sentiment_classification.py +1 -2
- mteb/tasks/classification/tur/turkish_product_sentiment_classification.py +1 -2
- mteb/tasks/classification/ukr/ukr_formality_classification.py +2 -15
- mteb/tasks/classification/urd/urdu_roman_sentiment_classification.py +1 -2
- mteb/tasks/classification/vie/amazon_counterfactual_vn_classification.py +1 -6
- mteb/tasks/classification/vie/amazon_polarity_vn_classification.py +1 -6
- mteb/tasks/classification/vie/amazon_reviews_vn_classification.py +1 -5
- mteb/tasks/classification/vie/banking77_vn_classification.py +1 -5
- mteb/tasks/classification/vie/emotion_vn_classification.py +1 -5
- mteb/tasks/classification/vie/imdb_vn_classification.py +1 -5
- mteb/tasks/classification/vie/massive_intent_vn_classification.py +1 -5
- mteb/tasks/classification/vie/massive_scenario_vn_classification.py +1 -5
- mteb/tasks/classification/vie/mtop_domain_vn_classification.py +1 -5
- mteb/tasks/classification/vie/mtop_intent_vn_classification.py +1 -5
- mteb/tasks/classification/vie/toxic_conversations_vn_classification.py +1 -5
- mteb/tasks/classification/vie/tweet_sentiment_extraction_vn_classification.py +1 -5
- mteb/tasks/classification/vie/vie_student_feedback_classification.py +1 -2
- mteb/tasks/classification/zho/cmteb_classification.py +5 -10
- mteb/tasks/classification/zho/yue_openrice_review_classification.py +1 -2
- mteb/tasks/classification/zul/isi_zulu_news_classification.py +1 -2
- mteb/tasks/clustering/jpn/mews_c16_ja_clustering.py +1 -3
- mteb/tasks/clustering/multilingual/sib200_clustering_s2s.py +1 -6
- mteb/tasks/clustering/nld/dutch_news_articles_clustering_p2p.py +3 -0
- mteb/tasks/clustering/nld/dutch_news_articles_clustering_s2s.py +3 -0
- mteb/tasks/clustering/nld/iconclass_clustering_s2s.py +3 -0
- mteb/tasks/clustering/nld/open_tender_clustering_p2p.py +3 -0
- mteb/tasks/clustering/nld/open_tender_clustering_s2s.py +3 -0
- mteb/tasks/clustering/nld/vabb_clustering_p2p.py +3 -0
- mteb/tasks/clustering/nld/vabb_clustering_s2s.py +3 -0
- mteb/tasks/clustering/vie/reddit_clustering_p2p_vn.py +1 -5
- mteb/tasks/clustering/vie/reddit_clustering_vn.py +1 -5
- mteb/tasks/clustering/vie/stack_exchange_clustering_p2p_vn.py +1 -5
- mteb/tasks/clustering/vie/stack_exchange_clustering_vn.py +1 -5
- mteb/tasks/clustering/vie/twenty_newsgroups_clustering_vn.py +1 -5
- mteb/tasks/multilabel_classification/ita/emit_classification.py +1 -5
- mteb/tasks/multilabel_classification/kor/kor_hate_speech_ml_classification.py +1 -9
- mteb/tasks/multilabel_classification/mlt/maltese_news_classification.py +1 -6
- mteb/tasks/multilabel_classification/nld/covid_disinformation_nl_multi_label_classification.py +3 -0
- mteb/tasks/multilabel_classification/nld/vabb_multi_label_classification.py +3 -0
- mteb/tasks/multilabel_classification/por/brazilian_toxic_tweets_classification.py +1 -6
- mteb/tasks/multilabel_classification/swe/swedish_patent_cpc_group_classification.py +1 -1
- mteb/tasks/multilabel_classification/swe/swedish_patent_cpc_subclass_classification.py +1 -2
- mteb/tasks/pair_classification/dan/talemaader_pc.py +1 -6
- mteb/tasks/pair_classification/eng/legal_bench_pc.py +1 -9
- mteb/tasks/pair_classification/nld/sick_nl_pair_classification.py +3 -0
- mteb/tasks/pair_classification/nld/xlwic_nl_pair_classification.py +3 -0
- mteb/tasks/pair_classification/rus/__init__.py +2 -2
- mteb/tasks/pair_classification/rus/terra.py +51 -25
- mteb/tasks/pair_classification/vie/sprint_duplicate_questions_pcvn.py +1 -5
- mteb/tasks/pair_classification/vie/twitter_sem_eval2015_pcvn.py +1 -5
- mteb/tasks/pair_classification/vie/twitter_url_corpus_pcvn.py +1 -5
- mteb/tasks/regression/multilingual/ru_sci_bench_regression.py +2 -6
- mteb/tasks/reranking/jpn/__init__.py +9 -1
- mteb/tasks/reranking/jpn/j_qa_ra_reranking_lite.py +49 -0
- mteb/tasks/reranking/jpn/ja_cwir_reranking_lite.py +47 -0
- mteb/tasks/reranking/multilingual/__init__.py +2 -0
- mteb/tasks/reranking/multilingual/multi_long_doc_reranking.py +70 -0
- mteb/tasks/reranking/multilingual/x_glue_wpr_reranking.py +1 -2
- mteb/tasks/reranking/vie/ask_ubuntu_dup_questions_vn.py +1 -5
- mteb/tasks/reranking/vie/sci_docs_reranking_vn.py +1 -5
- mteb/tasks/reranking/vie/stack_overflow_dup_questions_vn.py +1 -5
- mteb/tasks/retrieval/code/fresh_stack_retrieval.py +8 -5
- mteb/tasks/retrieval/eng/lit_search_retrieval.py +1 -8
- mteb/tasks/retrieval/eng/vidore_bench_retrieval.py +4 -0
- mteb/tasks/retrieval/jpn/__init__.py +8 -0
- mteb/tasks/retrieval/jpn/ja_cwir_retrieval.py +1 -4
- mteb/tasks/retrieval/jpn/ja_cwir_retrieval_lite.py +47 -0
- mteb/tasks/retrieval/jpn/jaqket_retrieval_lite.py +50 -0
- mteb/tasks/retrieval/jpn/miracl_ja_retrieval_lite.py +52 -0
- mteb/tasks/retrieval/jpn/mr_tydi_ja_retrieval_lite.py +48 -0
- mteb/tasks/retrieval/kat/georgian_faq_retrieval.py +11 -4
- mteb/tasks/retrieval/kor/__init__.py +2 -1
- mteb/tasks/retrieval/kor/squad_kor_v1_retrieval.py +47 -0
- mteb/tasks/retrieval/multilingual/__init__.py +22 -0
- mteb/tasks/retrieval/multilingual/belebele_retrieval.py +5 -4
- mteb/tasks/retrieval/multilingual/jina_vdr_bench_retrieval.py +56 -42
- mteb/tasks/retrieval/multilingual/mkqa_retrieval.py +1 -2
- mteb/tasks/retrieval/multilingual/mlqa_retrieval.py +1 -4
- mteb/tasks/retrieval/multilingual/multi_long_doc_retrieval.py +1 -2
- mteb/tasks/retrieval/multilingual/public_health_qa_retrieval.py +9 -4
- mteb/tasks/retrieval/multilingual/ru_sci_bench_retrieval.py +2 -12
- mteb/tasks/retrieval/multilingual/vidore2_bench_retrieval.py +4 -2
- mteb/tasks/retrieval/multilingual/vidore3_bench_retrieval.py +399 -0
- mteb/tasks/retrieval/nld/__init__.py +8 -4
- mteb/tasks/retrieval/nld/argu_ana_nl_retrieval.py +46 -27
- mteb/tasks/retrieval/nld/bbsard_nl_retrieval.py +3 -0
- mteb/tasks/retrieval/nld/dutch_news_articles_retrieval.py +3 -0
- mteb/tasks/retrieval/nld/legal_qa_nl_retrieval.py +3 -0
- mteb/tasks/retrieval/nld/nf_corpus_nl_retrieval.py +42 -25
- mteb/tasks/retrieval/nld/open_tender_retrieval.py +3 -0
- mteb/tasks/retrieval/nld/sci_fact_nl_retrieval.py +42 -24
- mteb/tasks/retrieval/nld/scidocsnl_retrieval.py +44 -27
- mteb/tasks/retrieval/nld/vabb_retrieval.py +3 -0
- mteb/tasks/retrieval/slk/slovak_sum_retrieval.py +1 -7
- mteb/tasks/retrieval/vie/argu_ana_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/climate_fevervn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/cqa_dupstack_android_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/cqa_dupstack_gis_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/cqa_dupstack_mathematica_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/cqa_dupstack_physics_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/cqa_dupstack_programmers_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/cqa_dupstack_stats_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/cqa_dupstack_tex_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/cqa_dupstack_unix_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/cqa_dupstack_webmasters_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/cqa_dupstack_wordpress_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/db_pedia_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/fevervn_retrieval.py +1 -7
- mteb/tasks/retrieval/vie/fi_qa2018_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/green_node_table_markdown_retrieval.py +16 -1
- mteb/tasks/retrieval/vie/hotpot_qavn_retrieval.py +1 -6
- mteb/tasks/retrieval/vie/msmarcovn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/nf_corpus_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/nqvn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/quora_vn_retrieval.py +1 -6
- mteb/tasks/retrieval/vie/sci_fact_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/scidocsvn_retrieval.py +1 -6
- mteb/tasks/retrieval/vie/touche2020_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/treccovidvn_retrieval.py +1 -5
- mteb/tasks/sts/nld/sick_nl_sts.py +1 -0
- mteb/tasks/sts/vie/biosses_stsvn.py +1 -5
- mteb/tasks/sts/vie/sickr_stsvn.py +1 -5
- mteb/tasks/sts/vie/sts_benchmark_stsvn.py +1 -5
- mteb/tasks/zeroshot_classification/eng/gtsrb.py +1 -1
- mteb/tasks/zeroshot_classification/eng/patch_camelyon.py +1 -1
- mteb/tasks/zeroshot_classification/eng/ucf101.py +1 -5
- mteb/types/_encoder_io.py +7 -2
- {mteb-2.1.4.dist-info → mteb-2.5.2.dist-info}/METADATA +11 -5
- {mteb-2.1.4.dist-info → mteb-2.5.2.dist-info}/RECORD +457 -391
- mteb/models/model_implementations/nb_sbert.py +0 -25
- {mteb-2.1.4.dist-info → mteb-2.5.2.dist-info}/WHEEL +0 -0
- {mteb-2.1.4.dist-info → mteb-2.5.2.dist-info}/entry_points.txt +0 -0
- {mteb-2.1.4.dist-info → mteb-2.5.2.dist-info}/licenses/LICENSE +0 -0
- {mteb-2.1.4.dist-info → mteb-2.5.2.dist-info}/top_level.txt +0 -0
|
@@ -247,6 +247,40 @@ kalm_v2_training_data = {
|
|
|
247
247
|
}
|
|
248
248
|
|
|
249
249
|
|
|
250
|
+
KaLM_Embedding_gemma_3_12b_training_data = [
|
|
251
|
+
"BornholmBitextMining",
|
|
252
|
+
"BibleNLPBitextMining", # train[256:] to avoid the test set in MTEB
|
|
253
|
+
"FinParaSTS",
|
|
254
|
+
"SemRel24STS",
|
|
255
|
+
"STSB",
|
|
256
|
+
"GermanSTSBenchmark",
|
|
257
|
+
"TERRa",
|
|
258
|
+
"StatcanDialogueDatasetRetrieval",
|
|
259
|
+
"SwissJudgementClassification",
|
|
260
|
+
"GreekLegalCodeClassification",
|
|
261
|
+
"CataloniaTweetClassification",
|
|
262
|
+
"NordicLangClassification",
|
|
263
|
+
"CyrillicTurkicLangClassification",
|
|
264
|
+
"PoemSentimentClassification",
|
|
265
|
+
"CzechProductReviewSentimentClassification",
|
|
266
|
+
"DBpediaClassification",
|
|
267
|
+
"IndicLangClassification",
|
|
268
|
+
"NusaParagraphEmotionClassification",
|
|
269
|
+
"OdiaNewsClassification",
|
|
270
|
+
"ScalaClassification",
|
|
271
|
+
"ItaCaseholdClassification",
|
|
272
|
+
"CSFDSKMovieReviewSentimentClassification",
|
|
273
|
+
"ToxicConversationsClassification",
|
|
274
|
+
"PAC",
|
|
275
|
+
"SpartQA",
|
|
276
|
+
"WinoGrande",
|
|
277
|
+
"Quail",
|
|
278
|
+
"HellaSwag",
|
|
279
|
+
"AlphaNLI",
|
|
280
|
+
]
|
|
281
|
+
KaLM_Embedding_gemma_3_12b_training_data += kalm_training_data
|
|
282
|
+
|
|
283
|
+
|
|
250
284
|
KaLM_task_prompts = {
|
|
251
285
|
"AmazonCounterfactualClassification": "Given an Amazon review, judge whether it is counterfactual.",
|
|
252
286
|
"AmazonPolarityClassification": "Classifying Amazon reviews into positive or negative sentiment",
|
|
@@ -450,13 +484,7 @@ KaLM_v2_task_prompts = {
|
|
|
450
484
|
"SummEval-document": "Retrieve semantically similar summaries",
|
|
451
485
|
}
|
|
452
486
|
|
|
453
|
-
|
|
454
|
-
"Classification": "classify the query into different classes.",
|
|
455
|
-
"MultilabelClassification": "Instruct: classify the query into different classes.",
|
|
456
|
-
"Clustering": "classify the query into different classes.",
|
|
457
|
-
"Reranking-query": "Given a query, retrieve documents that answer the query.",
|
|
458
|
-
"Retrieval-query": "Given a query, retrieve documents that answer the query.",
|
|
459
|
-
"InstructionRetrieval-query": "Given a query, retrieve documents that answer the query.",
|
|
487
|
+
KaLM_Embedding_gemma_3_12b_task_prompts = {
|
|
460
488
|
"AmazonCounterfactualClassification": "Classify a given Amazon customer review text as either counterfactual or not-counterfactual",
|
|
461
489
|
"AmazonPolarityClassification": "Classify Amazon reviews into positive or negative sentiment",
|
|
462
490
|
"AmazonReviewsClassification": "Classify the given Amazon review into its appropriate rating category",
|
|
@@ -478,26 +506,38 @@ KaLM_X_task_prompts = {
|
|
|
478
506
|
"ArxivClusteringP2P": "Identify the main and secondary category of Arxiv papers based on the titles and abstracts",
|
|
479
507
|
"ArxivClusteringS2S": "Identify the main and secondary category of Arxiv papers based on the titles",
|
|
480
508
|
"BiorxivClusteringP2P": "Identify the main category of Biorxiv papers based on the titles and abstracts",
|
|
509
|
+
"BiorxivClusteringP2P.v2": "Identify the main category of Biorxiv papers based on the titles and abstracts",
|
|
481
510
|
"BiorxivClusteringS2S": "Identify the main category of Biorxiv papers based on the titles",
|
|
511
|
+
"BiorxivClusteringS2S.v2": "Identify the main category of Biorxiv papers based on the titles",
|
|
482
512
|
"MedrxivClusteringP2P": "Identify the main category of Medrxiv papers based on the titles and abstracts",
|
|
513
|
+
"MedrxivClusteringP2P.v2": "Identify the main category of Medrxiv papers based on the titles and abstracts",
|
|
483
514
|
"MedrxivClusteringS2S": "Identify the main category of Medrxiv papers based on the titles",
|
|
515
|
+
"MedrxivClusteringS2S.v2": "Identify the main category of Medrxiv papers based on the titles",
|
|
484
516
|
"RedditClustering": "Identify the topic or theme of Reddit posts based on the titles",
|
|
485
517
|
"RedditClusteringP2P": "Identify the topic or theme of Reddit posts based on the titles and posts",
|
|
486
518
|
"StackExchangeClustering": "Identify the topic or theme of StackExchange posts based on the titles",
|
|
519
|
+
"StackExchangeClustering.v2": "Identify the topic or theme of StackExchange posts based on the titles",
|
|
487
520
|
"StackExchangeClusteringP2P": "Identify the topic or theme of StackExchange posts based on the given paragraphs",
|
|
521
|
+
"StackExchangeClusteringP2P.v2": "Identify the topic or theme of StackExchange posts based on the given paragraphs",
|
|
488
522
|
"TwentyNewsgroupsClustering": "Identify the topic or theme of the given news articles",
|
|
489
523
|
"CLSClusteringS2S": "Identify the main category of scholar papers based on the titles",
|
|
490
524
|
"CLSClusteringP2P": "Identify the main category of scholar papers based on the titles and abstracts",
|
|
525
|
+
"CLSClusteringP2P.v2": "Identify the main category of scholar papers based on the titles and abstracts",
|
|
491
526
|
"ThuNewsClusteringS2S": "Identify the topic or theme of the given news articles based on the titles",
|
|
492
527
|
"ThuNewsClusteringP2P": "Identify the topic or theme of the given news articles based on the titles and contents",
|
|
493
528
|
"AskUbuntuDupQuestions-query": "Retrieve duplicate questions from AskUbuntu forum",
|
|
494
529
|
"MindSmallReranking-query": "Retrieve relevant news articles based on user browsing history",
|
|
495
530
|
"SciDocsRR-query": "Given a title of a scientific paper, retrieve the titles of other relevant papers",
|
|
496
531
|
"StackOverflowDupQuestions-query": "Retrieve duplicate questions from StackOverflow forum",
|
|
532
|
+
"SprintDuplicateQuestions": "Retrieve semantically duplicate questions",
|
|
533
|
+
"TwitterSemEval2015": "Retrieve tweets that are semantically similar to the given tweet",
|
|
534
|
+
"TwitterURLCorpus": "Retrieve tweets that are semantically similar to the given tweet",
|
|
497
535
|
"T2Reranking-query": "Given a Chinese search query, retrieve web passages that answer the question",
|
|
498
536
|
"MMarcoReranking-query": "Given a Chinese search query, retrieve web passages that answer the question",
|
|
499
537
|
"CMedQAv1-reranking-query": "Given a Chinese community medical question, retrieve replies that best answer the question",
|
|
500
538
|
"CMedQAv2-reranking-query": "Given a Chinese community medical question, retrieve replies that best answer the question",
|
|
539
|
+
"Ocnli": "Retrieve semantically similar text.",
|
|
540
|
+
"Cmnli": "Retrieve semantically similar text.",
|
|
501
541
|
"ArguAna-query": "Given a claim, find documents that refute the claim",
|
|
502
542
|
"ArguAna-document": "Given a claim, find documents that refute the claim",
|
|
503
543
|
"ClimateFEVER-query": "Given a claim about climate change, retrieve documents that support or refute the claim",
|
|
@@ -512,22 +552,26 @@ KaLM_X_task_prompts = {
|
|
|
512
552
|
"NFCorpus-query": "Given a question, retrieve relevant documents that best answer the question",
|
|
513
553
|
"NQ-query": "Given a question, retrieve Wikipedia passages that answer the question",
|
|
514
554
|
"QuoraRetrieval-query": "Given a question, retrieve questions that are semantically equivalent to the given question",
|
|
515
|
-
"SCIDOCS-query": "Given a scientific paper
|
|
555
|
+
"SCIDOCS-query": "Given a title of a scientific paper, retrieve the titles of other relevant papers",
|
|
516
556
|
"SciFact-query": "Given a scientific claim, retrieve documents that support or refute the claim",
|
|
517
557
|
"Touche2020-query": "Given a question, retrieve detailed and persuasive arguments that answer the question",
|
|
518
558
|
"Touche2020Retrieval.v3-query": "Given a question, retrieve detailed and persuasive arguments that answer the question",
|
|
519
|
-
"TRECCOVID-query": "Given a query
|
|
559
|
+
"TRECCOVID-query": "Given a medical query, retrieve documents that answer the query",
|
|
520
560
|
"T2Retrieval-query": "Given a Chinese search query, retrieve web passages that answer the question",
|
|
521
561
|
"MMarcoRetrieval-query": "Given a web search query, retrieve relevant passages that answer the query",
|
|
562
|
+
"VoyageMMarcoReranking-query": "Given a Japanese search query, retrieve web passages that answer the question",
|
|
522
563
|
"DuRetrieval-query": "Given a Chinese search query, retrieve web passages that answer the question",
|
|
523
564
|
"CovidRetrieval-query": "Given a question on COVID-19, retrieve news articles that answer the question",
|
|
524
565
|
"CmedqaRetrieval-query": "Given a Chinese community medical question, retrieve replies that best answer the question",
|
|
525
566
|
"EcomRetrieval-query": "Given a user query from an e-commerce website, retrieve description sentences of relevant products",
|
|
526
567
|
"MedicalRetrieval-query": "Given a medical question, retrieve user replies that best answer the question",
|
|
527
568
|
"VideoRetrieval-query": "Given a video search query, retrieve the titles of relevant videos",
|
|
569
|
+
"STSBenchmarkMultilingualSTS": "Retrieve semantically similar text",
|
|
570
|
+
"SICKFr": "Retrieve semantically similar text",
|
|
571
|
+
"SummEvalFr": "Given a news summary, retrieve other semantically similar summaries",
|
|
528
572
|
"MasakhaNEWSClassification": "Classify the News in the given texts into one of the seven category: politics,sports,health,business,entertainment,technology,religion ",
|
|
529
|
-
"
|
|
530
|
-
"
|
|
573
|
+
"OpusparcusPC": "Retrieve semantically similar text",
|
|
574
|
+
"PAWSX": "Retrieve semantically similar text",
|
|
531
575
|
"HALClusteringS2S": "Identify the main category of academic passage based on the titles and contents",
|
|
532
576
|
"MasakhaNEWSClusteringP2P": "Identify the topic or theme of the given news articles based on the titles and contents",
|
|
533
577
|
"MasakhaNEWSClusteringS2S": "Identify the topic or theme of the given news articles based on the titles",
|
|
@@ -541,10 +585,23 @@ KaLM_X_task_prompts = {
|
|
|
541
585
|
"XPQARetrieval-query": "Given a question, retrieve passages that answer the question",
|
|
542
586
|
"MintakaRetrieval-query": "Given a question, retrieve passages that answer the question",
|
|
543
587
|
"CBD": "Classify the sentiment of polish tweet reviews",
|
|
544
|
-
"PolEmo2.0-IN": "Classify the sentiment of
|
|
545
|
-
"PolEmo2.0-OUT": "Classify the sentiment of
|
|
588
|
+
"PolEmo2.0-IN": "Classify the sentiment of medicine and hotels online reviews",
|
|
589
|
+
"PolEmo2.0-OUT": "Classify the sentiment of products and school online reviews",
|
|
546
590
|
"AllegroReviews": "Classify the sentiment of reviews from e-commerce marketplace Allegro",
|
|
547
|
-
"PAC": 'Classify
|
|
591
|
+
"PAC": 'Classify Polish contract clauses into one of the following two types: "Safe Contract Clauses" and "Unfair Contract Clauses".',
|
|
592
|
+
"SICK-E-PL": "Retrieve semantically similar text",
|
|
593
|
+
"SICK-R-PL": "Retrieve semantically similar text",
|
|
594
|
+
"STS22": "Retrieve semantically similar text",
|
|
595
|
+
"AFQMC": "Retrieve semantically similar text",
|
|
596
|
+
"BQ": "Retrieve semantically similar text",
|
|
597
|
+
"LCQMC": "Retrieve semantically similar text",
|
|
598
|
+
"QBQTC": "Retrieve semantically similar text",
|
|
599
|
+
"STS12": "Retrieve semantically similar text",
|
|
600
|
+
"PpcPC": "Retrieve semantically similar text",
|
|
601
|
+
"CDSC-E": "Retrieve semantically similar text",
|
|
602
|
+
"BornholmBitextMining": "Retrieve parallel sentences",
|
|
603
|
+
"NorwegianCourtsBitextMining": "Retrieve parallel sentences",
|
|
604
|
+
"PSC": "Retrieve semantically similar text",
|
|
548
605
|
"EightTagsClustering": "Identify of headlines from social media posts in Polish into 8 categories: film, history, food, medicine, motorization, work, sport and technology",
|
|
549
606
|
"ArguAna-PL-query": "Given a claim, find documents that refute the claim",
|
|
550
607
|
"DBPedia-PL-query": "Given a query, retrieve relevant entity descriptions from DBPedia",
|
|
@@ -554,9 +611,9 @@ KaLM_X_task_prompts = {
|
|
|
554
611
|
"NFCorpus-PL-query": "Given a question, retrieve relevant documents that best answer the question",
|
|
555
612
|
"NQ-PL-query": "Given a question, retrieve Wikipedia passages that answer the question",
|
|
556
613
|
"Quora-PL-query": "Given a question, retrieve questions that are semantically equivalent to the given question",
|
|
557
|
-
"SCIDOCS-PL-query": "Given a scientific paper
|
|
614
|
+
"SCIDOCS-PL-query": "Given a title of a scientific paper, retrieve the titles of other relevant papers",
|
|
558
615
|
"SciFact-PL-query": "Given a scientific claim, retrieve documents that support or refute the claim",
|
|
559
|
-
"TRECCOVID-PL-query": "Given a query
|
|
616
|
+
"TRECCOVID-PL-query": "Given a medical query, retrieve documents that answer the query",
|
|
560
617
|
"GeoreviewClassification": "Classify the organization rating based on the reviews",
|
|
561
618
|
"HeadlineClassification": "Classify the topic or theme of the given news headline",
|
|
562
619
|
"InappropriatenessClassification": "Classify the given message as either sensitive topic or not",
|
|
@@ -567,9 +624,12 @@ KaLM_X_task_prompts = {
|
|
|
567
624
|
"GeoreviewClusteringP2P": "Identify the organization category based on the reviews",
|
|
568
625
|
"RuSciBenchGRNTIClusteringP2P": "Identify the category of scientific papers based on the titles and abstracts",
|
|
569
626
|
"RuSciBenchOECDClusteringP2P": "Identify the category of scientific papers based on the titles and abstracts",
|
|
627
|
+
"TERRa": "Given a premise, retrieve a hypothesis that is entailed by the premise",
|
|
570
628
|
"RuBQReranking-query": "Given a question, retrieve Wikipedia passages that answer the question",
|
|
571
629
|
"RiaNewsRetrieval-query": "Given a headline, retrieval relevant articles",
|
|
572
630
|
"RuBQRetrieval-query": "Given a question, retrieve Wikipedia passages that answer the question",
|
|
631
|
+
"RUParaPhraserSTS": "Retrieve semantically similar text",
|
|
632
|
+
"RuSTSBenchmarkSTS": "Retrieve semantically similar text",
|
|
573
633
|
"AppsRetrieval-query": "Given a question about code problem, retrieval code that can solve user's problem",
|
|
574
634
|
"COIRCodeSearchNetRetrieval-query": "Given a code snippet, retrieve the comment corresponding to that code.",
|
|
575
635
|
"CodeEditSearchRetrieval-query": "Given a piece of code, retrieval code that in the ",
|
|
@@ -582,8 +642,19 @@ KaLM_X_task_prompts = {
|
|
|
582
642
|
"CosQA-query": "Given a question about coding, retrieval code or passage that can solve user's question",
|
|
583
643
|
"StackOverflowQA-query": "Given a question about coding, retrieval code or passage that can solve user's question",
|
|
584
644
|
"SyntheticText2SQL-query": "Given a user's question, retrieve SQL queries that are appropriate responses to the question",
|
|
585
|
-
"
|
|
586
|
-
"
|
|
645
|
+
"BibleNLPBitextMining": "Retrieve parallel sentences",
|
|
646
|
+
"BUCC.v2": "Retrieve parallel sentences",
|
|
647
|
+
"DiaBlaBitextMining": "Retrieve parallel sentences",
|
|
648
|
+
"FloresBitextMining": "Retrieve parallel sentences",
|
|
649
|
+
"IN22GenBitextMining": "Retrieve parallel sentences",
|
|
650
|
+
"IndicGenBenchFloresBitextMining": "Retrieve parallel sentences",
|
|
651
|
+
"NollySentiBitextMining": "Retrieve parallel sentences",
|
|
652
|
+
"NTREXBitextMining": "Retrieve parallel sentences",
|
|
653
|
+
"NusaTranslationBitextMining": "Retrieve parallel sentences",
|
|
654
|
+
"NusaXBitextMining": "Retrieve parallel sentences",
|
|
655
|
+
"Tatoeba": "Retrieve parallel sentences",
|
|
656
|
+
"BulgarianStoreReviewSentimentClassfication": "Classify user reviews into positive, negative or mixed sentiment",
|
|
657
|
+
"CzechProductReviewSentimentClassification": "Classify product reviews into positive, neutral, or negative sentiment",
|
|
587
658
|
"GreekLegalCodeClassification": "Given a greek legal text, classify its topic",
|
|
588
659
|
"DBpediaClassification": "Given a Wikipedia articles, categorized it into classes based on its DBpedia ontology",
|
|
589
660
|
"FinancialPhrasebankClassification": "Given financial news, categorized by sentiment into positive, negative, or neutral",
|
|
@@ -611,7 +682,7 @@ KaLM_X_task_prompts = {
|
|
|
611
682
|
"PunjabiNewsClassification": "Given a news article, categorized it into two-classes",
|
|
612
683
|
"SinhalaNewsClassification": "Given a news article, categorized it into political, business, technology, sports and Entertainment",
|
|
613
684
|
"CSFDSKMovieReviewSentimentClassification": "Given a movie review, classify its rating on a scale from 0 to 5",
|
|
614
|
-
"SiswatiNewsClassification": "Given a news article, classify its topic",
|
|
685
|
+
"SiswatiNewsClassification": "Given a news article in Siswati, classify its topic",
|
|
615
686
|
"SlovakMovieReviewSentimentClassification": "Given a movie review, categorized it into positive or negative",
|
|
616
687
|
"SwahiliNewsClassification": "Given a news article, classify its domain",
|
|
617
688
|
"TswanaNewsClassification": "Given a news article, classify its topic",
|
|
@@ -621,7 +692,9 @@ KaLM_X_task_prompts = {
|
|
|
621
692
|
"ArXivHierarchicalClusteringP2P": "Identify the main and secondary category of Arxiv papers based on the titles and abstracts",
|
|
622
693
|
"ArXivHierarchicalClusteringS2S": "Identify the main and secondary category of Arxiv papers based on the titles",
|
|
623
694
|
"BigPatentClustering.v2": "Identify the category of documents from the Big Patent dataset",
|
|
695
|
+
"AlloProfClusteringS2S": "Identify the topic of document titles from Allo Prof dataset",
|
|
624
696
|
"AlloProfClusteringS2S.v2": "Identify the topic of document titles from Allo Prof dataset",
|
|
697
|
+
"AlloProfClusteringP2P": "Identify the topic of document titles and descriptions from Allo Prof dataset",
|
|
625
698
|
"HALClusteringS2S.v2": "Identify the topic of titles from HAL",
|
|
626
699
|
"SIB200ClusteringS2S": "Identify the category of documents",
|
|
627
700
|
"WikiClusteringP2P.v2": "Identify the category of wiki passages",
|
|
@@ -629,26 +702,60 @@ KaLM_X_task_prompts = {
|
|
|
629
702
|
"KorHateSpeechMLClassification": "Given a Korean online news comments, classify its fine-grained hate speech classes",
|
|
630
703
|
"MalteseNewsClassification": "Given a maltese new, classify its topic",
|
|
631
704
|
"MultiEURLEXMultilabelClassification": "Given a text, classify its topic",
|
|
632
|
-
"BrazilianToxicTweetsClassification": "
|
|
705
|
+
"BrazilianToxicTweetsClassification": "Classify the toxic tweets in Brazilian Portuguese into one of the six categories: LGBTQ+phobia, Xenophobia, Obscene, Insult, Misogyny and Racism.",
|
|
706
|
+
"CTKFactsNLI": "Retrieve semantically similar text",
|
|
707
|
+
"indonli": "Retrieve semantically similar text",
|
|
708
|
+
"ArmenianParaphrasePC": "Retrieve semantically similar text",
|
|
709
|
+
"PawsXPairClassification": "Retrieve semantically similar text",
|
|
710
|
+
"RTE3": "Retrieve semantically similar text",
|
|
711
|
+
"XNLI": "Retrieve semantically similar text",
|
|
712
|
+
"GermanSTSBenchmark": "Retrieve semantically similar text",
|
|
713
|
+
"SICK-R": "Retrieve semantically similar text",
|
|
714
|
+
"STS13": "Retrieve semantically similar text",
|
|
715
|
+
"STS14": "Retrieve semantically similar text",
|
|
716
|
+
"STSBenchmark": "Retrieve semantically similar text",
|
|
717
|
+
"FaroeseSTS": "Retrieve semantically similar text",
|
|
718
|
+
"FinParaSTS": "Retrieve semantically similar text",
|
|
719
|
+
"JSICK": "Retrieve semantically similar text",
|
|
720
|
+
"IndicCrosslingualSTS": "Retrieve parallel sentences",
|
|
721
|
+
"SemRel24STS": "Retrieve semantically similar text",
|
|
722
|
+
"STS17": "Retrieve semantically similar text",
|
|
723
|
+
"STS22.v2": "Retrieve semantically similar text",
|
|
724
|
+
"STSES": "Retrieve semantically similar text",
|
|
725
|
+
"STSB": "Retrieve semantically similar text",
|
|
633
726
|
"AILAStatutes-query": "Identifying the most relevant statutes for a given situation",
|
|
634
|
-
"HagridRetrieval-query": "
|
|
635
|
-
"LegalBenchCorporateLobbying-query": "
|
|
727
|
+
"HagridRetrieval-query": "Given an information-seeking question, retrieve the best replies to answer the question",
|
|
728
|
+
"LegalBenchCorporateLobbying-query": "Given a query, retrieve relevant legal bill summaries",
|
|
636
729
|
"LEMBPasskeyRetrieval-query": "Retrieval the relevant passage for the given query",
|
|
637
730
|
"BelebeleRetrieval-query": "Retrieval the relevant passage for the given query",
|
|
638
731
|
"MLQARetrieval-query": "Retrieval the relevant passage for the given query",
|
|
639
732
|
"StatcanDialogueDatasetRetrieval-query": "Retrieval the relevant passage for the given query",
|
|
640
733
|
"WikipediaRetrievalMultilingual-query": "Retrieval the relevant passage for the given query",
|
|
641
|
-
"Core17InstructionRetrieval-query": "Retrieval the relevant passage for the given query",
|
|
642
|
-
"News21InstructionRetrieval-query": "Retrieval the relevant passage for the given query",
|
|
643
|
-
"Robust04InstructionRetrieval-query": "Retrieval the relevant passage for the given query",
|
|
734
|
+
"Core17InstructionRetrieval-query": "Retrieval the relevant passage for the given query with conditions",
|
|
735
|
+
"News21InstructionRetrieval-query": "Retrieval the relevant passage for the given query with conditions",
|
|
736
|
+
"Robust04InstructionRetrieval-query": "Retrieval the relevant passage for the given query with conditions",
|
|
644
737
|
"WebLINXCandidatesReranking-query": "Retrieval the relevant passage for the given query",
|
|
645
738
|
"WikipediaRerankingMultilingual-query": "Retrieval the relevant passage for the given query",
|
|
739
|
+
"STS15": "Retrieve semantically similar text",
|
|
646
740
|
"MIRACLRetrievalHardNegatives-query": "Retrieval relevant passage for the given query",
|
|
741
|
+
"BIOSSES": "Retrieve semantically similar text",
|
|
647
742
|
"CQADupstackRetrieval-query": "Given a question, retrieve detailed question descriptions from Stackexchange that are duplicates to the given question",
|
|
648
743
|
"CQADupstackGamingRetrieval-query": "Given a question, retrieve detailed question descriptions from Stackexchange that are duplicates to the given question",
|
|
649
744
|
"CQADupstackGamingRetrieval-document": "Given a question, retrieve detailed question descriptions from Stackexchange that are duplicates to the given question",
|
|
650
745
|
"CQADupstackUnixRetrieval-query": "Given a question, retrieve detailed question descriptions from Stackexchange that are duplicates to the given question",
|
|
651
746
|
"CQADupstackUnixRetrieval-document": "Given a question, retrieve detailed question descriptions from Stackexchange that are duplicates to the given question",
|
|
747
|
+
"STS16": "Retrieve semantically similar text",
|
|
748
|
+
"SummEval": "Retrieve semantically similar text",
|
|
749
|
+
"ATEC": "Retrieve semantically similar text",
|
|
750
|
+
"ScalaClassification": "Classify passages into correct or correct in Scandinavian Languages based on linguistic acceptability",
|
|
751
|
+
"SpartQA-query": "Given the following spatial reasoning question, retrieve the right answer.",
|
|
752
|
+
"CEDRClassification": "Given a comment as query, classify expressed emotions into joy, sadness, surprise, fear, and anger",
|
|
753
|
+
"DalajClassification": "Classify texts based on linguistic acceptability in Swedish",
|
|
754
|
+
"TempReasonL1-query": "Given the following question about time, retrieve the correct answer.",
|
|
755
|
+
"WinoGrande-query": "Given the following sentence, retrieve an appropriate answer to fill in the missing underscored part.",
|
|
756
|
+
"NordicLangClassification": "Classify texts based on language",
|
|
757
|
+
"TwitterHjerneRetrieval-query": "Retrieve answers to questions asked in Danish tweets",
|
|
758
|
+
"SwednClusteringP2P": "Identify news categories in Swedish passages",
|
|
652
759
|
}
|
|
653
760
|
|
|
654
761
|
KaLM_INSTRUCTION = "Instruct: {instruction} \n Query: "
|
|
@@ -662,6 +769,7 @@ HIT_TMG__KaLM_embedding_multilingual_mini_instruct_v1 = ModelMeta(
|
|
|
662
769
|
prompts_dict=KaLM_task_prompts,
|
|
663
770
|
),
|
|
664
771
|
name="HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1",
|
|
772
|
+
model_type=["dense"],
|
|
665
773
|
revision="45e42c89990c40aca042659133fc8b13c28634b5",
|
|
666
774
|
release_date="2024-10-23",
|
|
667
775
|
languages=["eng-Latn", "zho-Hans"],
|
|
@@ -686,6 +794,7 @@ HIT_TMG__KaLM_embedding_multilingual_mini_instruct_v1 = ModelMeta(
|
|
|
686
794
|
HIT_TMG__KaLM_embedding_multilingual_mini_v1 = ModelMeta(
|
|
687
795
|
loader=sentence_transformers_loader,
|
|
688
796
|
name="HIT-TMG/KaLM-embedding-multilingual-mini-v1",
|
|
797
|
+
model_type=["dense"],
|
|
689
798
|
revision="8a82a0cd2b322b91723e252486f7cce6fd8ac9d3",
|
|
690
799
|
release_date="2024-08-27",
|
|
691
800
|
languages=["eng-Latn", "zho-Hans"],
|
|
@@ -716,6 +825,7 @@ HIT_TMG__KaLM_embedding_multilingual_mini_instruct_v1_5 = ModelMeta(
|
|
|
716
825
|
prompts_dict=KaLM_task_prompts,
|
|
717
826
|
),
|
|
718
827
|
name="HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1.5",
|
|
828
|
+
model_type=["dense"],
|
|
719
829
|
revision="fcff2f8a54e4cd96b7766fef1ee960a43d42bb3c",
|
|
720
830
|
release_date="2024-12-26",
|
|
721
831
|
languages=["eng-Latn", "zho-Hans"],
|
|
@@ -746,6 +856,7 @@ HIT_TMG__KaLM_embedding_multilingual_mini_instruct_v2 = ModelMeta(
|
|
|
746
856
|
prompts_dict=KaLM_v2_task_prompts,
|
|
747
857
|
),
|
|
748
858
|
name="HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v2",
|
|
859
|
+
model_type=["dense"],
|
|
749
860
|
revision="d2a21c232dc712ae8230af56d1027cf21b7864bf",
|
|
750
861
|
release_date="2025-06-25",
|
|
751
862
|
languages=["eng-Latn", "zho-Hans"],
|
|
@@ -776,6 +887,7 @@ KaLM_Embedding_KaLM_embedding_multilingual_mini_instruct_v2_5 = ModelMeta(
|
|
|
776
887
|
prompts_dict=KaLM_v2_task_prompts,
|
|
777
888
|
),
|
|
778
889
|
name="KaLM-Embedding/KaLM-embedding-multilingual-mini-instruct-v2.5",
|
|
890
|
+
model_type=["dense"],
|
|
779
891
|
revision="6a4cfc1084cb459ebd4729b53a8656a61448c720",
|
|
780
892
|
release_date="2025-09-30",
|
|
781
893
|
languages=["eng-Latn", "zho-Hans"],
|
|
@@ -794,4 +906,70 @@ KaLM_Embedding_KaLM_embedding_multilingual_mini_instruct_v2_5 = ModelMeta(
|
|
|
794
906
|
training_datasets=kalm_v2_training_data,
|
|
795
907
|
adapted_from="HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v2",
|
|
796
908
|
superseded_by=None,
|
|
909
|
+
citation="""@misc{zhao2025kalmembeddingv2,
|
|
910
|
+
title={KaLM-Embedding-V2: Superior Training Techniques and Data Inspire A Versatile Embedding Model},
|
|
911
|
+
author={Xinping Zhao and Xinshuo Hu and Zifei Shan and Shouzheng Huang and Yao Zhou and Xin Zhang and Zetian Sun and Zhenyu Liu and Dongfang Li and Xinyuan Wei and Youcheng Pan and Yang Xiang and Meishan Zhang and Haofen Wang and Jun Yu and Baotian Hu and Min Zhang},
|
|
912
|
+
year={2025},
|
|
913
|
+
eprint={2506.20923},
|
|
914
|
+
archivePrefix={arXiv},
|
|
915
|
+
primaryClass={cs.CL},
|
|
916
|
+
url={https://arxiv.org/abs/2506.20923},
|
|
917
|
+
}
|
|
918
|
+
|
|
919
|
+
@misc{hu2025kalmembedding,
|
|
920
|
+
title={KaLM-Embedding: Superior Training Data Brings A Stronger Embedding Model},
|
|
921
|
+
author={Xinshuo Hu and Zifei Shan and Xinping Zhao and Zetian Sun and Zhenyu Liu and Dongfang Li and Shaolin Ye and Xinyuan Wei and Qian Chen and Baotian Hu and Haofen Wang and Jun Yu and Min Zhang},
|
|
922
|
+
year={2025},
|
|
923
|
+
eprint={2501.01028},
|
|
924
|
+
archivePrefix={arXiv},
|
|
925
|
+
primaryClass={cs.CL},
|
|
926
|
+
url={https://arxiv.org/abs/2501.01028},
|
|
927
|
+
}""",
|
|
928
|
+
)
|
|
929
|
+
|
|
930
|
+
KaLM_Embedding_gemma_3_12b_2511 = ModelMeta(
|
|
931
|
+
loader=InstructSentenceTransformerModel,
|
|
932
|
+
loader_kwargs=dict(
|
|
933
|
+
instruction_template=KaLM_INSTRUCTION,
|
|
934
|
+
max_seq_length=512,
|
|
935
|
+
apply_instruction_to_passages=True,
|
|
936
|
+
prompts_dict=KaLM_Embedding_gemma_3_12b_task_prompts,
|
|
937
|
+
),
|
|
938
|
+
name="tencent/KaLM-Embedding-Gemma3-12B-2511",
|
|
939
|
+
model_type=["dense"],
|
|
940
|
+
revision="edf22f4753f58b05e3f5495818d31f12db63056d",
|
|
941
|
+
languages=None,
|
|
942
|
+
open_weights=True,
|
|
943
|
+
release_date="2025-11-06",
|
|
944
|
+
n_parameters=11.76 * 1e9,
|
|
945
|
+
memory_usage_mb=44884,
|
|
946
|
+
max_tokens=32768,
|
|
947
|
+
embed_dim=3840,
|
|
948
|
+
license=None,
|
|
949
|
+
reference="https://kalm-embedding.github.io/",
|
|
950
|
+
similarity_fn_name="cosine",
|
|
951
|
+
framework=["Sentence Transformers", "PyTorch"],
|
|
952
|
+
use_instructions=True,
|
|
953
|
+
public_training_code="https://github.com/HITsz-TMG/KaLM-Embedding",
|
|
954
|
+
public_training_data=None,
|
|
955
|
+
training_datasets=KaLM_Embedding_gemma_3_12b_training_data,
|
|
956
|
+
citation="""@misc{zhao2025kalmembeddingv2,
|
|
957
|
+
title={KaLM-Embedding-V2: Superior Training Techniques and Data Inspire A Versatile Embedding Model},
|
|
958
|
+
author={Xinping Zhao and Xinshuo Hu and Zifei Shan and Shouzheng Huang and Yao Zhou and Xin Zhang and Zetian Sun and Zhenyu Liu and Dongfang Li and Xinyuan Wei and Youcheng Pan and Yang Xiang and Meishan Zhang and Haofen Wang and Jun Yu and Baotian Hu and Min Zhang},
|
|
959
|
+
year={2025},
|
|
960
|
+
eprint={2506.20923},
|
|
961
|
+
archivePrefix={arXiv},
|
|
962
|
+
primaryClass={cs.CL},
|
|
963
|
+
url={https://arxiv.org/abs/2506.20923},
|
|
964
|
+
}
|
|
965
|
+
|
|
966
|
+
@misc{hu2025kalmembedding,
|
|
967
|
+
title={KaLM-Embedding: Superior Training Data Brings A Stronger Embedding Model},
|
|
968
|
+
author={Xinshuo Hu and Zifei Shan and Xinping Zhao and Zetian Sun and Zhenyu Liu and Dongfang Li and Shaolin Ye and Xinyuan Wei and Qian Chen and Baotian Hu and Haofen Wang and Jun Yu and Min Zhang},
|
|
969
|
+
year={2025},
|
|
970
|
+
eprint={2501.01028},
|
|
971
|
+
archivePrefix={arXiv},
|
|
972
|
+
primaryClass={cs.CL},
|
|
973
|
+
url={https://arxiv.org/abs/2501.01028},
|
|
974
|
+
}""",
|
|
797
975
|
)
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
from mteb.models import sentence_transformers_loader
|
|
2
|
+
from mteb.models.model_meta import ModelMeta, ScoringFunction
|
|
3
|
+
|
|
4
|
+
sbert_swedish = ModelMeta(
|
|
5
|
+
loader=sentence_transformers_loader, # type: ignore[arg-type]
|
|
6
|
+
name="KBLab/sentence-bert-swedish-cased",
|
|
7
|
+
model_type=["dense"],
|
|
8
|
+
languages=["swe-Latn"],
|
|
9
|
+
open_weights=True,
|
|
10
|
+
revision="6b5e83cd29c03729cfdc33d13b1423399b0efb5c",
|
|
11
|
+
release_date="2023-01-11",
|
|
12
|
+
n_parameters=124690944,
|
|
13
|
+
memory_usage_mb=476,
|
|
14
|
+
embed_dim=768,
|
|
15
|
+
license="apache-2.0",
|
|
16
|
+
max_tokens=384,
|
|
17
|
+
reference="https://huggingface.co/KBLab/sentence-bert-swedish-cased",
|
|
18
|
+
similarity_fn_name=ScoringFunction.COSINE,
|
|
19
|
+
framework=["Sentence Transformers", "PyTorch"],
|
|
20
|
+
use_instructions=False,
|
|
21
|
+
public_training_code=None,
|
|
22
|
+
public_training_data=None,
|
|
23
|
+
training_datasets=None,
|
|
24
|
+
adapted_from="sentence-transformers/all-mpnet-base-v2",
|
|
25
|
+
citation="""@misc{rekathati2021introducing,
|
|
26
|
+
author = {Rekathati, Faton},
|
|
27
|
+
title = {The KBLab Blog: Introducing a Swedish Sentence Transformer},
|
|
28
|
+
url = {https://kb-labb.github.io/posts/2021-08-23-a-swedish-sentence-transformer/},
|
|
29
|
+
year = {2021}
|
|
30
|
+
}""",
|
|
31
|
+
)
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
from mteb.models.model_meta import ModelMeta, ScoringFunction
|
|
2
|
+
from mteb.models.sentence_transformer_wrapper import (
|
|
3
|
+
sentence_transformers_loader,
|
|
4
|
+
)
|
|
5
|
+
|
|
6
|
+
dfm_enc_large = ModelMeta(
|
|
7
|
+
loader=sentence_transformers_loader, # type: ignore
|
|
8
|
+
name="KennethEnevoldsen/dfm-sentence-encoder-large",
|
|
9
|
+
model_type=["dense"],
|
|
10
|
+
languages=["dan-Latn"],
|
|
11
|
+
open_weights=True,
|
|
12
|
+
revision="132c53391e7a780dc6a2f9a03724d0158fe7122c",
|
|
13
|
+
release_date="2023-07-12",
|
|
14
|
+
n_parameters=355087360,
|
|
15
|
+
memory_usage_mb=1554,
|
|
16
|
+
embed_dim=1024,
|
|
17
|
+
license="mit",
|
|
18
|
+
max_tokens=512,
|
|
19
|
+
reference="https://huggingface.co/KennethEnevoldsen/dfm-sentence-encoder-large",
|
|
20
|
+
similarity_fn_name=ScoringFunction.COSINE,
|
|
21
|
+
framework=["Sentence Transformers", "PyTorch"],
|
|
22
|
+
use_instructions=False,
|
|
23
|
+
superseded_by=None,
|
|
24
|
+
adapted_from="chcaa/dfm-encoder-large-v1",
|
|
25
|
+
training_datasets=set(), # just contrastive pre-training
|
|
26
|
+
public_training_code="https://huggingface.co/KennethEnevoldsen/dfm-sentence-encoder-large#hyperparameters",
|
|
27
|
+
citation="""@article{enevoldsenScandinavianEmbeddingBenchmarks2024,
|
|
28
|
+
title = {The {Scandinavian} {Embedding} {Benchmarks}: {Comprehensive} {Assessment} of {Multilingual} and {Monolingual} {Text} {Embedding}},
|
|
29
|
+
shorttitle = {The {Scandinavian} {Embedding} {Benchmarks}},
|
|
30
|
+
url = {https://openreview.net/forum?id=pJl_i7HIA72},
|
|
31
|
+
language = {en},
|
|
32
|
+
urldate = {2024-04-12},
|
|
33
|
+
author = {Enevoldsen, Kenneth and Kardos, Márton and Muennighoff, Niklas and Nielbo, Kristoffer},
|
|
34
|
+
month = feb,
|
|
35
|
+
year = {2024},
|
|
36
|
+
}
|
|
37
|
+
""",
|
|
38
|
+
public_training_data="https://huggingface.co/datasets/danish-foundation-models/danish-gigaword", # paragraphs extracted from Danish Gigaword
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
dfm_enc_med = ModelMeta(
|
|
42
|
+
loader=sentence_transformers_loader, # type: ignore
|
|
43
|
+
name="KennethEnevoldsen/dfm-sentence-encoder-medium",
|
|
44
|
+
model_type=["dense"],
|
|
45
|
+
languages=["dan-Latn"],
|
|
46
|
+
open_weights=True,
|
|
47
|
+
revision="701bce95d499fa97610d57e8823c54fd1fb79930",
|
|
48
|
+
release_date="2023-07-12",
|
|
49
|
+
n_parameters=124445952,
|
|
50
|
+
memory_usage_mb=475,
|
|
51
|
+
embed_dim=768,
|
|
52
|
+
license="mit",
|
|
53
|
+
max_tokens=512,
|
|
54
|
+
reference="https://huggingface.co/KennethEnevoldsen/dfm-sentence-encoder-medium",
|
|
55
|
+
similarity_fn_name=ScoringFunction.COSINE,
|
|
56
|
+
framework=["Sentence Transformers", "PyTorch"],
|
|
57
|
+
use_instructions=False,
|
|
58
|
+
superseded_by=None,
|
|
59
|
+
adapted_from=None,
|
|
60
|
+
public_training_code=None,
|
|
61
|
+
training_datasets=set(), # just contrastive pre-training
|
|
62
|
+
citation="""@article{enevoldsenScandinavianEmbeddingBenchmarks2024,
|
|
63
|
+
title = {The {Scandinavian} {Embedding} {Benchmarks}: {Comprehensive} {Assessment} of {Multilingual} and {Monolingual} {Text} {Embedding}},
|
|
64
|
+
shorttitle = {The {Scandinavian} {Embedding} {Benchmarks}},
|
|
65
|
+
url = {https://openreview.net/forum?id=pJl_i7HIA72},
|
|
66
|
+
language = {en},
|
|
67
|
+
urldate = {2024-04-12},
|
|
68
|
+
author = {Enevoldsen, Kenneth and Kardos, Márton and Muennighoff, Niklas and Nielbo, Kristoffer},
|
|
69
|
+
month = feb,
|
|
70
|
+
year = {2024},
|
|
71
|
+
}
|
|
72
|
+
""",
|
|
73
|
+
public_training_data=None,
|
|
74
|
+
)
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
from mteb.models import sentence_transformers_loader
|
|
2
|
+
from mteb.models.model_meta import ModelMeta, ScoringFunction
|
|
3
|
+
|
|
4
|
+
xlmr_scandi = ModelMeta(
|
|
5
|
+
loader=sentence_transformers_loader, # type: ignore[arg-type]
|
|
6
|
+
name="KFST/XLMRoberta-en-da-sv-nb",
|
|
7
|
+
model_type=["dense"],
|
|
8
|
+
languages=["swe-Latn", "nob-Latn", "nno-Latn", "dan-Latn", "eng-Latn"],
|
|
9
|
+
open_weights=True,
|
|
10
|
+
revision="d40c10ca7b1e68b5a8372f2d112dac9eb3279df1",
|
|
11
|
+
release_date="2022-02-22",
|
|
12
|
+
n_parameters=278043648,
|
|
13
|
+
memory_usage_mb=1061,
|
|
14
|
+
embed_dim=768,
|
|
15
|
+
license="not specified",
|
|
16
|
+
max_tokens=512,
|
|
17
|
+
reference="https://huggingface.co/KFST/XLMRoberta-en-da-sv-nb",
|
|
18
|
+
similarity_fn_name=ScoringFunction.COSINE,
|
|
19
|
+
framework=["Sentence Transformers", "PyTorch"],
|
|
20
|
+
use_instructions=False,
|
|
21
|
+
public_training_code=None,
|
|
22
|
+
public_training_data=None,
|
|
23
|
+
training_datasets=None,
|
|
24
|
+
adapted_from="FacebookAI/xlm-roberta-base",
|
|
25
|
+
)
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
from mteb.models import ModelMeta, sentence_transformers_loader
|
|
2
|
+
|
|
3
|
+
kowshik24_bangla_embedding_model = ModelMeta(
|
|
4
|
+
loader=sentence_transformers_loader,
|
|
5
|
+
name="Kowshik24/bangla-sentence-transformer-ft-matryoshka-paraphrase-multilingual-mpnet-base-v2",
|
|
6
|
+
model_type=["dense"],
|
|
7
|
+
languages=["ben-Beng"], # Bengali using Bengali script
|
|
8
|
+
open_weights=True,
|
|
9
|
+
revision="6689c21e69be5950596bad084457cbaa138728d8",
|
|
10
|
+
release_date="2025-11-10",
|
|
11
|
+
n_parameters=278_000_000,
|
|
12
|
+
memory_usage_mb=1061,
|
|
13
|
+
embed_dim=768,
|
|
14
|
+
license="apache-2.0",
|
|
15
|
+
max_tokens=128,
|
|
16
|
+
reference="https://huggingface.co/Kowshik24/bangla-sentence-transformer-ft-matryoshka-paraphrase-multilingual-mpnet-base-v2",
|
|
17
|
+
similarity_fn_name="cosine",
|
|
18
|
+
framework=["Sentence Transformers", "PyTorch"],
|
|
19
|
+
use_instructions=False,
|
|
20
|
+
public_training_code="https://github.com/kowshik24/Bangla-Embedding",
|
|
21
|
+
public_training_data="https://huggingface.co/datasets/sartajekram/BanglaRQA",
|
|
22
|
+
training_datasets=set(),
|
|
23
|
+
citation="""@inproceedings{reimers-2019-sentence-bert,
|
|
24
|
+
title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
|
|
25
|
+
author = "Reimers, Nils and Gurevych, Iryna",
|
|
26
|
+
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
|
|
27
|
+
month = "11",
|
|
28
|
+
year = "2019",
|
|
29
|
+
publisher = "Association for Computational Linguistics",
|
|
30
|
+
url = "https://arxiv.org/abs/1908.10084",
|
|
31
|
+
}""",
|
|
32
|
+
)
|
|
@@ -12,6 +12,7 @@ LENS_CITATION = """@article{lei2025lens,
|
|
|
12
12
|
lens_d4000 = ModelMeta(
|
|
13
13
|
loader=None,
|
|
14
14
|
name="yibinlei/LENS-d4000",
|
|
15
|
+
model_type=["dense"],
|
|
15
16
|
languages=None,
|
|
16
17
|
open_weights=True,
|
|
17
18
|
revision="e473b33364e6c48a324796fd1411d3b93670c6fe",
|
|
@@ -34,6 +35,7 @@ lens_d4000 = ModelMeta(
|
|
|
34
35
|
lens_d8000 = ModelMeta(
|
|
35
36
|
loader=None,
|
|
36
37
|
name="yibinlei/LENS-d8000",
|
|
38
|
+
model_type=["dense"],
|
|
37
39
|
languages=None,
|
|
38
40
|
open_weights=True,
|
|
39
41
|
revision="a0b87bd91cb27b6f2f0b0fe22c28026da1d464ef",
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import torch
|
|
2
2
|
|
|
3
|
+
from mteb.models.instruct_wrapper import instruct_wrapper
|
|
3
4
|
from mteb.models.model_meta import ModelMeta, ScoringFunction
|
|
4
|
-
from mteb.models.sentence_transformer_wrapper import SentenceTransformerEncoderWrapper
|
|
5
5
|
from mteb.types import PromptType
|
|
6
6
|
|
|
7
7
|
from .e5_instruct import E5_MISTRAL_TRAINING_DATA
|
|
@@ -22,7 +22,7 @@ def instruction_template(
|
|
|
22
22
|
|
|
23
23
|
|
|
24
24
|
Linq_Embed_Mistral = ModelMeta(
|
|
25
|
-
loader=
|
|
25
|
+
loader=instruct_wrapper,
|
|
26
26
|
loader_kwargs=dict(
|
|
27
27
|
instruction_template=instruction_template,
|
|
28
28
|
attn="cccc",
|
|
@@ -32,6 +32,7 @@ Linq_Embed_Mistral = ModelMeta(
|
|
|
32
32
|
normalized=True,
|
|
33
33
|
),
|
|
34
34
|
name="Linq-AI-Research/Linq-Embed-Mistral",
|
|
35
|
+
model_type=["dense"],
|
|
35
36
|
languages=["eng-Latn"],
|
|
36
37
|
open_weights=True,
|
|
37
38
|
revision="0c1a0b0589177079acc552433cad51d7c9132379",
|
|
@@ -112,6 +112,7 @@ listconranker = ModelMeta(
|
|
|
112
112
|
fp_options="float16",
|
|
113
113
|
),
|
|
114
114
|
name="ByteDance/ListConRanker",
|
|
115
|
+
model_type=["cross-encoder"],
|
|
115
116
|
languages=["zho-Hans"],
|
|
116
117
|
open_weights=True,
|
|
117
118
|
revision="95ae6a5f422a916bc36520f0f3e198e7d91520a0",
|
|
@@ -128,6 +129,5 @@ listconranker = ModelMeta(
|
|
|
128
129
|
use_instructions=False,
|
|
129
130
|
public_training_code=None,
|
|
130
131
|
public_training_data=None,
|
|
131
|
-
is_cross_encoder=True,
|
|
132
132
|
citation=LISTCONRANKER_CITATION,
|
|
133
133
|
)
|