mteb 2.1.4__py3-none-any.whl → 2.5.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mteb/__init__.py +4 -0
- mteb/_create_dataloaders.py +6 -3
- mteb/_evaluators/any_sts_evaluator.py +21 -12
- mteb/_evaluators/classification_metrics.py +54 -0
- mteb/_evaluators/clustering_evaluator.py +1 -1
- mteb/_evaluators/image/imagetext_pairclassification_evaluator.py +9 -4
- mteb/_evaluators/pair_classification_evaluator.py +30 -38
- mteb/_evaluators/sklearn_evaluator.py +15 -28
- mteb/_evaluators/text/bitext_mining_evaluator.py +4 -1
- mteb/_evaluators/text/summarization_evaluator.py +4 -2
- mteb/_evaluators/zeroshot_classification_evaluator.py +2 -2
- mteb/abstasks/_data_filter/__init__.py +0 -0
- mteb/abstasks/_data_filter/filters.py +125 -0
- mteb/abstasks/_data_filter/task_pipelines.py +102 -0
- mteb/abstasks/_statistics_calculation.py +6 -2
- mteb/abstasks/classification.py +0 -2
- mteb/abstasks/clustering.py +1 -1
- mteb/abstasks/clustering_legacy.py +3 -0
- mteb/abstasks/multilabel_classification.py +10 -3
- mteb/abstasks/pair_classification.py +8 -1
- mteb/abstasks/sts.py +7 -0
- mteb/abstasks/task_metadata.py +1 -0
- mteb/benchmarks/_create_table.py +84 -37
- mteb/benchmarks/benchmark.py +74 -15
- mteb/benchmarks/benchmarks/__init__.py +8 -0
- mteb/benchmarks/benchmarks/benchmarks.py +259 -15
- mteb/benchmarks/get_benchmark.py +2 -0
- mteb/cache.py +47 -10
- mteb/deprecated_evaluator.py +8 -13
- mteb/descriptive_stats/BitextMining/RuSciBenchBitextMining.v2.json +61 -0
- mteb/descriptive_stats/Classification/HebrewSentimentAnalysis.v3.json +60 -0
- mteb/descriptive_stats/Classification/TurkishConstitutionalCourtViolation.json +54 -0
- mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3ComputerScienceRetrieval.json +214 -0
- mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3EnergyRetrieval.json +214 -0
- mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3FinanceEnRetrieval.json +214 -0
- mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3FinanceFrRetrieval.json +214 -0
- mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3HrRetrieval.json +214 -0
- mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3IndustrialRetrieval.json +214 -0
- mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3NuclearRetrieval.json +214 -0
- mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3PharmaceuticalsRetrieval.json +214 -0
- mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3PhysicsRetrieval.json +214 -0
- mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3TelecomRetrieval.json +214 -0
- mteb/descriptive_stats/PairClassification/TERRa.V2.json +35 -0
- mteb/descriptive_stats/Reranking/JQaRARerankingLite.json +35 -0
- mteb/descriptive_stats/Reranking/JaCWIRRerankingLite.json +35 -0
- mteb/descriptive_stats/Reranking/MultiLongDocReranking.json +466 -0
- mteb/descriptive_stats/Retrieval/ArguAna-NL.v2.json +30 -0
- mteb/descriptive_stats/Retrieval/JaCWIRRetrievalLite.json +30 -0
- mteb/descriptive_stats/Retrieval/JaqketRetrievalLite.json +30 -0
- mteb/descriptive_stats/Retrieval/MIRACLJaRetrievalLite.json +30 -0
- mteb/descriptive_stats/Retrieval/MrTyDiJaRetrievalLite.json +30 -0
- mteb/descriptive_stats/Retrieval/NFCorpus-NL.v2.json +30 -0
- mteb/descriptive_stats/Retrieval/SCIDOCS-NL.v2.json +30 -0
- mteb/descriptive_stats/Retrieval/SQuADKorV1Retrieval.json +30 -0
- mteb/descriptive_stats/Retrieval/SciFact-NL.v2.json +30 -0
- mteb/evaluate.py +65 -45
- mteb/leaderboard/app.py +268 -133
- mteb/leaderboard/benchmark_selector.py +14 -5
- mteb/leaderboard/figures.py +13 -15
- mteb/leaderboard/table.py +82 -17
- mteb/models/__init__.py +4 -1
- mteb/models/abs_encoder.py +21 -17
- mteb/models/cache_wrappers/__init__.py +2 -1
- mteb/models/cache_wrappers/cache_backends/_hash_utils.py +2 -2
- mteb/models/cache_wrappers/cache_wrapper.py +1 -1
- mteb/models/get_model_meta.py +3 -114
- mteb/models/instruct_wrapper.py +5 -1
- mteb/models/model_implementations/align_models.py +7 -0
- mteb/models/model_implementations/amazon_models.py +1 -0
- mteb/models/model_implementations/andersborges.py +65 -0
- mteb/models/model_implementations/ara_models.py +8 -0
- mteb/models/model_implementations/arctic_models.py +8 -0
- mteb/models/model_implementations/b1ade_models.py +1 -0
- mteb/models/model_implementations/bedrock_models.py +4 -0
- mteb/models/model_implementations/bge_models.py +60 -0
- mteb/models/model_implementations/bica_model.py +35 -0
- mteb/models/model_implementations/blip2_models.py +11 -0
- mteb/models/model_implementations/blip_models.py +27 -0
- mteb/models/model_implementations/bm25.py +1 -0
- mteb/models/model_implementations/bmretriever_models.py +4 -0
- mteb/models/model_implementations/cadet_models.py +9 -0
- mteb/models/model_implementations/cde_models.py +14 -0
- mteb/models/model_implementations/clip_models.py +3 -0
- mteb/models/model_implementations/clips_models.py +100 -0
- mteb/models/model_implementations/codefuse_models.py +162 -0
- mteb/models/model_implementations/codesage_models.py +15 -0
- mteb/models/model_implementations/cohere_models.py +8 -1
- mteb/models/model_implementations/cohere_v.py +5 -0
- mteb/models/model_implementations/colpali_models.py +14 -6
- mteb/models/model_implementations/colqwen_models.py +271 -1
- mteb/models/model_implementations/colsmol_models.py +2 -0
- mteb/models/model_implementations/conan_models.py +1 -0
- mteb/models/model_implementations/dino_models.py +171 -0
- mteb/models/model_implementations/e5_instruct.py +4 -0
- mteb/models/model_implementations/e5_models.py +12 -101
- mteb/models/model_implementations/e5_v.py +1 -0
- mteb/models/model_implementations/eagerworks_models.py +164 -0
- mteb/models/model_implementations/emillykkejensen_models.py +91 -0
- mteb/models/model_implementations/en_code_retriever.py +1 -0
- mteb/models/model_implementations/euler_models.py +32 -0
- mteb/models/model_implementations/evaclip_models.py +4 -0
- mteb/models/model_implementations/fa_models.py +58 -0
- mteb/models/model_implementations/facebookai.py +193 -0
- mteb/models/model_implementations/geogpt_models.py +1 -0
- mteb/models/model_implementations/gme_v_models.py +11 -5
- mteb/models/model_implementations/google_models.py +16 -5
- mteb/models/model_implementations/granite_vision_embedding_models.py +7 -2
- mteb/models/model_implementations/gritlm_models.py +2 -0
- mteb/models/model_implementations/gte_models.py +78 -0
- mteb/models/model_implementations/hinvec_models.py +1 -0
- mteb/models/model_implementations/human.py +1 -0
- mteb/models/model_implementations/ibm_granite_models.py +6 -0
- mteb/models/model_implementations/inf_models.py +2 -0
- mteb/models/model_implementations/jasper_models.py +255 -2
- mteb/models/model_implementations/jina_clip.py +1 -0
- mteb/models/model_implementations/jina_models.py +209 -5
- mteb/models/model_implementations/kalm_models.py +203 -25
- mteb/models/model_implementations/kblab.py +31 -0
- mteb/models/model_implementations/kennethenevoldsen_models.py +74 -0
- mteb/models/model_implementations/kfst.py +25 -0
- mteb/models/model_implementations/kowshik24_models.py +32 -0
- mteb/models/model_implementations/lens_models.py +2 -0
- mteb/models/model_implementations/lgai_embedding_models.py +1 -0
- mteb/models/model_implementations/linq_models.py +3 -2
- mteb/models/model_implementations/listconranker.py +1 -1
- mteb/models/model_implementations/llm2clip_models.py +3 -0
- mteb/models/model_implementations/llm2vec_models.py +8 -0
- mteb/models/model_implementations/mcinext_models.py +3 -0
- mteb/models/model_implementations/mdbr_models.py +2 -0
- mteb/models/model_implementations/misc_models.py +362 -0
- mteb/models/model_implementations/mme5_models.py +1 -0
- mteb/models/model_implementations/moco_models.py +11 -0
- mteb/models/model_implementations/mod_models.py +191 -0
- mteb/models/model_implementations/model2vec_models.py +13 -0
- mteb/models/model_implementations/moka_models.py +3 -0
- mteb/models/model_implementations/mxbai_models.py +9 -0
- mteb/models/model_implementations/nbailab.py +70 -0
- mteb/models/model_implementations/no_instruct_sentence_models.py +1 -0
- mteb/models/model_implementations/nomic_models.py +156 -4
- mteb/models/model_implementations/nomic_models_vision.py +7 -2
- mteb/models/model_implementations/nvidia_llama_nemoretriever_colemb.py +23 -16
- mteb/models/model_implementations/nvidia_models.py +4 -1
- mteb/models/model_implementations/octen_models.py +195 -0
- mteb/models/model_implementations/openai_models.py +20 -16
- mteb/models/model_implementations/openclip_models.py +24 -0
- mteb/models/model_implementations/opensearch_neural_sparse_models.py +5 -0
- mteb/models/model_implementations/ops_moa_models.py +4 -2
- mteb/models/model_implementations/pawan_models.py +39 -0
- mteb/models/model_implementations/piccolo_models.py +8 -0
- mteb/models/model_implementations/promptriever_models.py +8 -4
- mteb/models/model_implementations/pylate_models.py +37 -4
- mteb/models/model_implementations/qodo_models.py +2 -0
- mteb/models/model_implementations/qtack_models.py +1 -0
- mteb/models/model_implementations/qwen3_models.py +6 -3
- mteb/models/model_implementations/qzhou_models.py +3 -1
- mteb/models/model_implementations/random_baseline.py +16 -21
- mteb/models/model_implementations/rasgaard_models.py +34 -0
- mteb/models/model_implementations/reasonir_model.py +1 -0
- mteb/models/model_implementations/repllama_models.py +2 -0
- mteb/models/model_implementations/rerankers_custom.py +3 -3
- mteb/models/model_implementations/rerankers_monot5_based.py +14 -14
- mteb/models/model_implementations/richinfoai_models.py +1 -0
- mteb/models/model_implementations/ru_sentence_models.py +51 -0
- mteb/models/model_implementations/ruri_models.py +322 -0
- mteb/models/model_implementations/salesforce_models.py +3 -0
- mteb/models/model_implementations/samilpwc_models.py +1 -0
- mteb/models/model_implementations/sarashina_embedding_models.py +168 -0
- mteb/models/model_implementations/searchmap_models.py +1 -0
- mteb/models/model_implementations/seed_1_6_embedding_models.py +8 -2
- mteb/models/model_implementations/seed_1_6_embedding_models_1215.py +658 -0
- mteb/models/model_implementations/seed_models.py +1 -0
- mteb/models/model_implementations/sentence_transformers_models.py +57 -0
- mteb/models/model_implementations/shuu_model.py +32 -31
- mteb/models/model_implementations/siglip_models.py +10 -0
- mteb/models/model_implementations/sonar_models.py +1 -0
- mteb/models/model_implementations/spartan8806_atles_champion.py +34 -0
- mteb/models/model_implementations/stella_models.py +6 -0
- mteb/models/model_implementations/tarka_models.py +376 -0
- mteb/models/model_implementations/ua_sentence_models.py +10 -0
- mteb/models/model_implementations/uae_models.py +1 -0
- mteb/models/model_implementations/vdr_models.py +2 -0
- mteb/models/model_implementations/vi_vn_models.py +39 -0
- mteb/models/model_implementations/vista_models.py +2 -0
- mteb/models/model_implementations/vlm2vec_models.py +2 -0
- mteb/models/model_implementations/voyage_models.py +15 -0
- mteb/models/model_implementations/voyage_v.py +8 -2
- mteb/models/model_implementations/xyz_models.py +1 -0
- mteb/models/model_implementations/youtu_models.py +1 -0
- mteb/models/model_implementations/yuan_models.py +34 -0
- mteb/models/model_implementations/yuan_models_en.py +58 -0
- mteb/models/model_meta.py +442 -22
- mteb/models/search_encoder_index/__init__.py +7 -0
- mteb/models/search_encoder_index/search_backend_protocol.py +50 -0
- mteb/models/search_encoder_index/search_indexes/__init__.py +5 -0
- mteb/models/search_encoder_index/search_indexes/faiss_search_index.py +157 -0
- mteb/models/search_wrappers.py +165 -48
- mteb/models/sentence_transformer_wrapper.py +2 -7
- mteb/results/benchmark_results.py +88 -47
- mteb/results/model_result.py +11 -4
- mteb/results/task_result.py +37 -19
- mteb/similarity_functions.py +49 -0
- mteb/tasks/bitext_mining/multilingual/__init__.py +2 -1
- mteb/tasks/bitext_mining/multilingual/bucc_bitext_mining.py +4 -2
- mteb/tasks/bitext_mining/multilingual/bucc_bitext_mining_fast.py +1 -1
- mteb/tasks/bitext_mining/multilingual/ru_sci_bench_bitext_mining.py +47 -5
- mteb/tasks/bitext_mining/multilingual/web_faq_bitext_mining.py +2 -6
- mteb/tasks/classification/ara/ajgt.py +1 -2
- mteb/tasks/classification/ara/hotel_review_sentiment_classification.py +1 -2
- mteb/tasks/classification/ara/online_store_review_sentiment_classification.py +1 -2
- mteb/tasks/classification/ara/restaurant_review_sentiment_classification.py +1 -2
- mteb/tasks/classification/ara/tweet_emotion_classification.py +1 -2
- mteb/tasks/classification/ara/tweet_sarcasm_classification.py +1 -2
- mteb/tasks/classification/ben/bengali_document_classification.py +1 -2
- mteb/tasks/classification/ben/bengali_hate_speech_classification.py +1 -2
- mteb/tasks/classification/ben/bengali_sentiment_analysis.py +1 -2
- mteb/tasks/classification/ces/csfdcz_movie_review_sentiment_classification.py +1 -2
- mteb/tasks/classification/ces/czech_product_review_sentiment_classification.py +1 -2
- mteb/tasks/classification/ces/czech_so_me_sentiment_classification.py +1 -2
- mteb/tasks/classification/dan/angry_tweets_classification.py +1 -2
- mteb/tasks/classification/dan/danish_political_comments_classification.py +1 -2
- mteb/tasks/classification/dan/ddisco_cohesion_classification.py +1 -2
- mteb/tasks/classification/dan/dk_hate_classification.py +1 -2
- mteb/tasks/classification/deu/german_politicians_twitter_sentiment_classification.py +1 -2
- mteb/tasks/classification/deu/ten_k_gnad_classification.py +1 -2
- mteb/tasks/classification/eng/amazon_polarity_classification.py +1 -2
- mteb/tasks/classification/eng/arxiv_classification.py +1 -2
- mteb/tasks/classification/eng/banking77_classification.py +1 -2
- mteb/tasks/classification/eng/dbpedia_classification.py +1 -2
- mteb/tasks/classification/eng/emotion_classification.py +1 -2
- mteb/tasks/classification/eng/financial_phrasebank_classification.py +1 -2
- mteb/tasks/classification/eng/frenk_en_classification.py +1 -2
- mteb/tasks/classification/eng/gtsrb_classification.py +1 -1
- mteb/tasks/classification/eng/imdb_classification.py +1 -2
- mteb/tasks/classification/eng/legal_bench_classification.py +14 -120
- mteb/tasks/classification/eng/news_classification.py +1 -2
- mteb/tasks/classification/eng/patch_camelyon_classification.py +1 -1
- mteb/tasks/classification/eng/patent_classification.py +1 -2
- mteb/tasks/classification/eng/poem_sentiment_classification.py +1 -2
- mteb/tasks/classification/eng/sds_eye_protection_classification.py +1 -2
- mteb/tasks/classification/eng/sds_gloves_classification.py +1 -2
- mteb/tasks/classification/eng/toxic_chat_classification.py +2 -19
- mteb/tasks/classification/eng/toxic_conversations_classification.py +1 -2
- mteb/tasks/classification/eng/tweet_sentiment_extraction_classification.py +1 -2
- mteb/tasks/classification/eng/tweet_topic_single_classification.py +2 -13
- mteb/tasks/classification/eng/ucf101_classification.py +1 -5
- mteb/tasks/classification/eng/wikipedia_bio_met_chem_classification.py +1 -2
- mteb/tasks/classification/eng/wikipedia_chem_fields_classification.py +1 -2
- mteb/tasks/classification/eng/wikipedia_comp_chem_spectroscopy_classification.py +1 -2
- mteb/tasks/classification/eng/wikipedia_crystallography_analytical_classification.py +1 -2
- mteb/tasks/classification/eng/wikipedia_theoretical_applied_classification.py +1 -2
- mteb/tasks/classification/eng/yahoo_answers_topics_classification.py +1 -2
- mteb/tasks/classification/eng/yelp_review_full_classification.py +1 -2
- mteb/tasks/classification/est/estonian_valence.py +1 -2
- mteb/tasks/classification/fas/fa_mteb_classification.py +7 -14
- mteb/tasks/classification/fil/filipino_hate_speech_classification.py +1 -2
- mteb/tasks/classification/fin/fin_toxicity_classification.py +2 -11
- mteb/tasks/classification/fra/french_book_reviews.py +1 -2
- mteb/tasks/classification/fra/movie_review_sentiment_classification.py +1 -2
- mteb/tasks/classification/guj/gujarati_news_classification.py +1 -2
- mteb/tasks/classification/heb/__init__.py +6 -1
- mteb/tasks/classification/heb/hebrew_sentiment_analysis.py +62 -4
- mteb/tasks/classification/hin/hindi_discourse_classification.py +1 -2
- mteb/tasks/classification/hin/sentiment_analysis_hindi.py +1 -2
- mteb/tasks/classification/hrv/frenk_hr_classification.py +1 -2
- mteb/tasks/classification/ind/indonesian_id_clickbait_classification.py +1 -2
- mteb/tasks/classification/ind/indonesian_mongabay_conservation_classification.py +1 -2
- mteb/tasks/classification/ita/italian_linguist_acceptability_classification.py +1 -2
- mteb/tasks/classification/jav/javanese_imdb_classification.py +1 -2
- mteb/tasks/classification/jpn/wrime_classification.py +1 -2
- mteb/tasks/classification/kan/kannada_news_classification.py +1 -2
- mteb/tasks/classification/kor/klue_tc.py +1 -2
- mteb/tasks/classification/kor/kor_hate_classification.py +2 -17
- mteb/tasks/classification/kor/kor_sarcasm_classification.py +2 -19
- mteb/tasks/classification/kur/kurdish_sentiment_classification.py +1 -2
- mteb/tasks/classification/mal/malayalam_news_classification.py +1 -2
- mteb/tasks/classification/mar/marathi_news_classification.py +1 -2
- mteb/tasks/classification/mkd/macedonian_tweet_sentiment_classification.py +1 -2
- mteb/tasks/classification/multilingual/catalonia_tweet_classification.py +1 -6
- mteb/tasks/classification/multilingual/multi_hate_classification.py +1 -4
- mteb/tasks/classification/multilingual/ru_sci_bench_classification.py +4 -23
- mteb/tasks/classification/multilingual/scala_classification.py +1 -2
- mteb/tasks/classification/multilingual/sib200_classification.py +1 -6
- mteb/tasks/classification/mya/myanmar_news.py +1 -2
- mteb/tasks/classification/nep/nepali_news_classification.py +1 -2
- mteb/tasks/classification/nld/dutch_book_review_sentiment_classification.py +4 -2
- mteb/tasks/classification/nld/dutch_cola_classification.py +3 -0
- mteb/tasks/classification/nld/dutch_government_bias_classification.py +3 -0
- mteb/tasks/classification/nld/dutch_news_articles_classification.py +3 -0
- mteb/tasks/classification/nld/dutch_sarcastic_headlines_classification.py +3 -0
- mteb/tasks/classification/nld/iconclass_classification.py +3 -0
- mteb/tasks/classification/nld/open_tender_classification.py +3 -0
- mteb/tasks/classification/nld/vaccin_chat_nl_classification.py +3 -0
- mteb/tasks/classification/nob/no_rec_classification.py +1 -2
- mteb/tasks/classification/nob/norwegian_parliament_classification.py +1 -2
- mteb/tasks/classification/ory/odia_news_classification.py +1 -2
- mteb/tasks/classification/pol/polish_classification.py +3 -6
- mteb/tasks/classification/ron/moroco.py +1 -2
- mteb/tasks/classification/ron/romanian_reviews_sentiment.py +1 -2
- mteb/tasks/classification/ron/romanian_sentiment_classification.py +1 -2
- mteb/tasks/classification/rus/georeview_classification.py +1 -2
- mteb/tasks/classification/rus/headline_classification.py +1 -2
- mteb/tasks/classification/rus/inappropriateness_classification.py +1 -2
- mteb/tasks/classification/rus/ru_reviews_classification.py +1 -2
- mteb/tasks/classification/rus/ru_toixic_classification_okmlcup.py +1 -2
- mteb/tasks/classification/rus/senti_ru_eval.py +1 -2
- mteb/tasks/classification/sin/sinhala_news_classification.py +1 -2
- mteb/tasks/classification/sin/sinhala_news_source_classification.py +1 -2
- mteb/tasks/classification/slk/csfdsk_movie_review_sentiment_classification.py +1 -2
- mteb/tasks/classification/slk/slovak_hate_speech_classification.py +1 -2
- mteb/tasks/classification/slk/slovak_movie_review_sentiment_classification.py +1 -2
- mteb/tasks/classification/slv/frenk_sl_classification.py +1 -2
- mteb/tasks/classification/spa/spanish_news_classification.py +1 -2
- mteb/tasks/classification/spa/spanish_sentiment_classification.py +1 -2
- mteb/tasks/classification/ssw/siswati_news_classification.py +1 -2
- mteb/tasks/classification/swa/swahili_news_classification.py +1 -2
- mteb/tasks/classification/swe/dalaj_classification.py +1 -2
- mteb/tasks/classification/swe/swe_rec_classification.py +1 -2
- mteb/tasks/classification/swe/swedish_sentiment_classification.py +1 -2
- mteb/tasks/classification/tam/tamil_news_classification.py +1 -2
- mteb/tasks/classification/tel/telugu_andhra_jyoti_news_classification.py +1 -2
- mteb/tasks/classification/tha/wisesight_sentiment_classification.py +1 -2
- mteb/tasks/classification/tsn/tswana_news_classification.py +1 -2
- mteb/tasks/classification/tur/__init__.py +4 -0
- mteb/tasks/classification/tur/turkish_constitutional_court.py +41 -0
- mteb/tasks/classification/tur/turkish_movie_sentiment_classification.py +1 -2
- mteb/tasks/classification/tur/turkish_product_sentiment_classification.py +1 -2
- mteb/tasks/classification/ukr/ukr_formality_classification.py +2 -15
- mteb/tasks/classification/urd/urdu_roman_sentiment_classification.py +1 -2
- mteb/tasks/classification/vie/amazon_counterfactual_vn_classification.py +1 -6
- mteb/tasks/classification/vie/amazon_polarity_vn_classification.py +1 -6
- mteb/tasks/classification/vie/amazon_reviews_vn_classification.py +1 -5
- mteb/tasks/classification/vie/banking77_vn_classification.py +1 -5
- mteb/tasks/classification/vie/emotion_vn_classification.py +1 -5
- mteb/tasks/classification/vie/imdb_vn_classification.py +1 -5
- mteb/tasks/classification/vie/massive_intent_vn_classification.py +1 -5
- mteb/tasks/classification/vie/massive_scenario_vn_classification.py +1 -5
- mteb/tasks/classification/vie/mtop_domain_vn_classification.py +1 -5
- mteb/tasks/classification/vie/mtop_intent_vn_classification.py +1 -5
- mteb/tasks/classification/vie/toxic_conversations_vn_classification.py +1 -5
- mteb/tasks/classification/vie/tweet_sentiment_extraction_vn_classification.py +1 -5
- mteb/tasks/classification/vie/vie_student_feedback_classification.py +1 -2
- mteb/tasks/classification/zho/cmteb_classification.py +5 -10
- mteb/tasks/classification/zho/yue_openrice_review_classification.py +1 -2
- mteb/tasks/classification/zul/isi_zulu_news_classification.py +1 -2
- mteb/tasks/clustering/jpn/mews_c16_ja_clustering.py +1 -3
- mteb/tasks/clustering/multilingual/sib200_clustering_s2s.py +1 -6
- mteb/tasks/clustering/nld/dutch_news_articles_clustering_p2p.py +3 -0
- mteb/tasks/clustering/nld/dutch_news_articles_clustering_s2s.py +3 -0
- mteb/tasks/clustering/nld/iconclass_clustering_s2s.py +3 -0
- mteb/tasks/clustering/nld/open_tender_clustering_p2p.py +3 -0
- mteb/tasks/clustering/nld/open_tender_clustering_s2s.py +3 -0
- mteb/tasks/clustering/nld/vabb_clustering_p2p.py +3 -0
- mteb/tasks/clustering/nld/vabb_clustering_s2s.py +3 -0
- mteb/tasks/clustering/vie/reddit_clustering_p2p_vn.py +1 -5
- mteb/tasks/clustering/vie/reddit_clustering_vn.py +1 -5
- mteb/tasks/clustering/vie/stack_exchange_clustering_p2p_vn.py +1 -5
- mteb/tasks/clustering/vie/stack_exchange_clustering_vn.py +1 -5
- mteb/tasks/clustering/vie/twenty_newsgroups_clustering_vn.py +1 -5
- mteb/tasks/multilabel_classification/ita/emit_classification.py +1 -5
- mteb/tasks/multilabel_classification/kor/kor_hate_speech_ml_classification.py +1 -9
- mteb/tasks/multilabel_classification/mlt/maltese_news_classification.py +1 -6
- mteb/tasks/multilabel_classification/nld/covid_disinformation_nl_multi_label_classification.py +3 -0
- mteb/tasks/multilabel_classification/nld/vabb_multi_label_classification.py +3 -0
- mteb/tasks/multilabel_classification/por/brazilian_toxic_tweets_classification.py +1 -6
- mteb/tasks/multilabel_classification/swe/swedish_patent_cpc_group_classification.py +1 -1
- mteb/tasks/multilabel_classification/swe/swedish_patent_cpc_subclass_classification.py +1 -2
- mteb/tasks/pair_classification/dan/talemaader_pc.py +1 -6
- mteb/tasks/pair_classification/eng/legal_bench_pc.py +1 -9
- mteb/tasks/pair_classification/nld/sick_nl_pair_classification.py +3 -0
- mteb/tasks/pair_classification/nld/xlwic_nl_pair_classification.py +3 -0
- mteb/tasks/pair_classification/rus/__init__.py +2 -2
- mteb/tasks/pair_classification/rus/terra.py +51 -25
- mteb/tasks/pair_classification/vie/sprint_duplicate_questions_pcvn.py +1 -5
- mteb/tasks/pair_classification/vie/twitter_sem_eval2015_pcvn.py +1 -5
- mteb/tasks/pair_classification/vie/twitter_url_corpus_pcvn.py +1 -5
- mteb/tasks/regression/multilingual/ru_sci_bench_regression.py +2 -6
- mteb/tasks/reranking/jpn/__init__.py +9 -1
- mteb/tasks/reranking/jpn/j_qa_ra_reranking_lite.py +49 -0
- mteb/tasks/reranking/jpn/ja_cwir_reranking_lite.py +47 -0
- mteb/tasks/reranking/multilingual/__init__.py +2 -0
- mteb/tasks/reranking/multilingual/multi_long_doc_reranking.py +70 -0
- mteb/tasks/reranking/multilingual/x_glue_wpr_reranking.py +1 -2
- mteb/tasks/reranking/vie/ask_ubuntu_dup_questions_vn.py +1 -5
- mteb/tasks/reranking/vie/sci_docs_reranking_vn.py +1 -5
- mteb/tasks/reranking/vie/stack_overflow_dup_questions_vn.py +1 -5
- mteb/tasks/retrieval/code/fresh_stack_retrieval.py +8 -5
- mteb/tasks/retrieval/eng/lit_search_retrieval.py +1 -8
- mteb/tasks/retrieval/eng/vidore_bench_retrieval.py +4 -0
- mteb/tasks/retrieval/jpn/__init__.py +8 -0
- mteb/tasks/retrieval/jpn/ja_cwir_retrieval.py +1 -4
- mteb/tasks/retrieval/jpn/ja_cwir_retrieval_lite.py +47 -0
- mteb/tasks/retrieval/jpn/jaqket_retrieval_lite.py +50 -0
- mteb/tasks/retrieval/jpn/miracl_ja_retrieval_lite.py +52 -0
- mteb/tasks/retrieval/jpn/mr_tydi_ja_retrieval_lite.py +48 -0
- mteb/tasks/retrieval/kat/georgian_faq_retrieval.py +11 -4
- mteb/tasks/retrieval/kor/__init__.py +2 -1
- mteb/tasks/retrieval/kor/squad_kor_v1_retrieval.py +47 -0
- mteb/tasks/retrieval/multilingual/__init__.py +22 -0
- mteb/tasks/retrieval/multilingual/belebele_retrieval.py +5 -4
- mteb/tasks/retrieval/multilingual/jina_vdr_bench_retrieval.py +56 -42
- mteb/tasks/retrieval/multilingual/mkqa_retrieval.py +1 -2
- mteb/tasks/retrieval/multilingual/mlqa_retrieval.py +1 -4
- mteb/tasks/retrieval/multilingual/multi_long_doc_retrieval.py +1 -2
- mteb/tasks/retrieval/multilingual/public_health_qa_retrieval.py +9 -4
- mteb/tasks/retrieval/multilingual/ru_sci_bench_retrieval.py +2 -12
- mteb/tasks/retrieval/multilingual/vidore2_bench_retrieval.py +4 -2
- mteb/tasks/retrieval/multilingual/vidore3_bench_retrieval.py +399 -0
- mteb/tasks/retrieval/nld/__init__.py +8 -4
- mteb/tasks/retrieval/nld/argu_ana_nl_retrieval.py +46 -27
- mteb/tasks/retrieval/nld/bbsard_nl_retrieval.py +3 -0
- mteb/tasks/retrieval/nld/dutch_news_articles_retrieval.py +3 -0
- mteb/tasks/retrieval/nld/legal_qa_nl_retrieval.py +3 -0
- mteb/tasks/retrieval/nld/nf_corpus_nl_retrieval.py +42 -25
- mteb/tasks/retrieval/nld/open_tender_retrieval.py +3 -0
- mteb/tasks/retrieval/nld/sci_fact_nl_retrieval.py +42 -24
- mteb/tasks/retrieval/nld/scidocsnl_retrieval.py +44 -27
- mteb/tasks/retrieval/nld/vabb_retrieval.py +3 -0
- mteb/tasks/retrieval/slk/slovak_sum_retrieval.py +1 -7
- mteb/tasks/retrieval/vie/argu_ana_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/climate_fevervn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/cqa_dupstack_android_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/cqa_dupstack_gis_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/cqa_dupstack_mathematica_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/cqa_dupstack_physics_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/cqa_dupstack_programmers_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/cqa_dupstack_stats_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/cqa_dupstack_tex_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/cqa_dupstack_unix_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/cqa_dupstack_webmasters_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/cqa_dupstack_wordpress_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/db_pedia_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/fevervn_retrieval.py +1 -7
- mteb/tasks/retrieval/vie/fi_qa2018_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/green_node_table_markdown_retrieval.py +16 -1
- mteb/tasks/retrieval/vie/hotpot_qavn_retrieval.py +1 -6
- mteb/tasks/retrieval/vie/msmarcovn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/nf_corpus_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/nqvn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/quora_vn_retrieval.py +1 -6
- mteb/tasks/retrieval/vie/sci_fact_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/scidocsvn_retrieval.py +1 -6
- mteb/tasks/retrieval/vie/touche2020_vn_retrieval.py +1 -5
- mteb/tasks/retrieval/vie/treccovidvn_retrieval.py +1 -5
- mteb/tasks/sts/nld/sick_nl_sts.py +1 -0
- mteb/tasks/sts/vie/biosses_stsvn.py +1 -5
- mteb/tasks/sts/vie/sickr_stsvn.py +1 -5
- mteb/tasks/sts/vie/sts_benchmark_stsvn.py +1 -5
- mteb/tasks/zeroshot_classification/eng/gtsrb.py +1 -1
- mteb/tasks/zeroshot_classification/eng/patch_camelyon.py +1 -1
- mteb/tasks/zeroshot_classification/eng/ucf101.py +1 -5
- mteb/types/_encoder_io.py +7 -2
- {mteb-2.1.4.dist-info → mteb-2.5.2.dist-info}/METADATA +11 -5
- {mteb-2.1.4.dist-info → mteb-2.5.2.dist-info}/RECORD +457 -391
- mteb/models/model_implementations/nb_sbert.py +0 -25
- {mteb-2.1.4.dist-info → mteb-2.5.2.dist-info}/WHEEL +0 -0
- {mteb-2.1.4.dist-info → mteb-2.5.2.dist-info}/entry_points.txt +0 -0
- {mteb-2.1.4.dist-info → mteb-2.5.2.dist-info}/licenses/LICENSE +0 -0
- {mteb-2.1.4.dist-info → mteb-2.5.2.dist-info}/top_level.txt +0 -0
mteb/models/model_meta.py
CHANGED
|
@@ -1,25 +1,46 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
1
4
|
import logging
|
|
5
|
+
import warnings
|
|
2
6
|
from collections.abc import Callable, Sequence
|
|
3
7
|
from dataclasses import field
|
|
4
8
|
from enum import Enum
|
|
9
|
+
from functools import partial
|
|
10
|
+
from pathlib import Path
|
|
5
11
|
from typing import TYPE_CHECKING, Any, Literal, cast
|
|
6
12
|
|
|
7
|
-
from huggingface_hub import
|
|
13
|
+
from huggingface_hub import (
|
|
14
|
+
GitCommitInfo,
|
|
15
|
+
ModelCard,
|
|
16
|
+
ModelCardData,
|
|
17
|
+
get_safetensors_metadata,
|
|
18
|
+
hf_hub_download,
|
|
19
|
+
list_repo_commits,
|
|
20
|
+
repo_exists,
|
|
21
|
+
)
|
|
8
22
|
from huggingface_hub.errors import (
|
|
23
|
+
EntryNotFoundError,
|
|
9
24
|
GatedRepoError,
|
|
10
25
|
NotASafetensorsRepoError,
|
|
26
|
+
RepositoryNotFoundError,
|
|
11
27
|
SafetensorsParsingError,
|
|
12
28
|
)
|
|
13
|
-
from pydantic import BaseModel, ConfigDict, field_validator
|
|
29
|
+
from pydantic import BaseModel, ConfigDict, field_validator, model_validator
|
|
30
|
+
from transformers import AutoConfig
|
|
31
|
+
from typing_extensions import Self
|
|
14
32
|
|
|
33
|
+
from mteb._helpful_enum import HelpfulStrEnum
|
|
15
34
|
from mteb.languages import check_language_code
|
|
35
|
+
from mteb.models.models_protocols import EncoderProtocol, MTEBModels
|
|
16
36
|
from mteb.types import ISOLanguageScript, Licenses, Modalities, StrDate, StrURL
|
|
17
37
|
|
|
18
|
-
from .models_protocols import EncoderProtocol, MTEBModels
|
|
19
|
-
|
|
20
38
|
if TYPE_CHECKING:
|
|
39
|
+
from sentence_transformers import CrossEncoder, SentenceTransformer
|
|
40
|
+
|
|
21
41
|
from mteb.abstasks import AbsTask
|
|
22
42
|
|
|
43
|
+
|
|
23
44
|
logger = logging.getLogger(__name__)
|
|
24
45
|
|
|
25
46
|
FRAMEWORKS = Literal[
|
|
@@ -36,8 +57,10 @@ FRAMEWORKS = Literal[
|
|
|
36
57
|
"ColPali",
|
|
37
58
|
]
|
|
38
59
|
|
|
60
|
+
MODEL_TYPES = Literal["dense", "cross-encoder", "late-interaction"]
|
|
61
|
+
|
|
39
62
|
|
|
40
|
-
class ScoringFunction(
|
|
63
|
+
class ScoringFunction(HelpfulStrEnum):
|
|
41
64
|
"""The scoring function used by the models."""
|
|
42
65
|
|
|
43
66
|
COSINE = "cosine"
|
|
@@ -58,6 +81,9 @@ def _get_loader_name(
|
|
|
58
81
|
return loader.__name__
|
|
59
82
|
|
|
60
83
|
|
|
84
|
+
_SENTENCE_TRANSFORMER_LIB_NAME = "Sentence Transformers"
|
|
85
|
+
|
|
86
|
+
|
|
61
87
|
class ModelMeta(BaseModel):
|
|
62
88
|
"""The model metadata object.
|
|
63
89
|
|
|
@@ -72,7 +98,7 @@ class ModelMeta(BaseModel):
|
|
|
72
98
|
models).
|
|
73
99
|
embed_dim: The dimension of the embeddings produced by the model. Currently all models are assumed to produce fixed-size embeddings.
|
|
74
100
|
revision: The revision number of the model. If None, it is assumed that the metadata (including the loader) is valid for all revisions of the model.
|
|
75
|
-
release_date: The date the model's revision was released.
|
|
101
|
+
release_date: The date the model's revision was released. If None, then release date will be added based on 1st commit in hf repository of model.
|
|
76
102
|
license: The license under which the model is released. Required if open_weights is True.
|
|
77
103
|
open_weights: Whether the model is open source or proprietary.
|
|
78
104
|
public_training_code: A link to the publicly available training code. If None, it is assumed that the training code is not publicly available.
|
|
@@ -90,7 +116,7 @@ class ModelMeta(BaseModel):
|
|
|
90
116
|
a benchmark as well as mark dataset contaminations.
|
|
91
117
|
adapted_from: Name of the model from which this model is adapted. For quantizations, fine-tunes, long doc extensions, etc.
|
|
92
118
|
superseded_by: Name of the model that supersedes this model, e.g., nvidia/NV-Embed-v2 supersedes v1.
|
|
93
|
-
|
|
119
|
+
model_type: A list of strings representing the type of model.
|
|
94
120
|
modalities: A list of strings representing the modalities the model supports. Default is ["text"].
|
|
95
121
|
contacts: The people to contact in case of a problem in the model, preferably a GitHub handle.
|
|
96
122
|
"""
|
|
@@ -120,10 +146,49 @@ class ModelMeta(BaseModel):
|
|
|
120
146
|
adapted_from: str | None = None
|
|
121
147
|
superseded_by: str | None = None
|
|
122
148
|
modalities: list[Modalities] = ["text"]
|
|
123
|
-
|
|
149
|
+
model_type: list[MODEL_TYPES] = ["dense"]
|
|
124
150
|
citation: str | None = None
|
|
125
151
|
contacts: list[str] | None = None
|
|
126
152
|
|
|
153
|
+
@model_validator(mode="before")
|
|
154
|
+
@classmethod
|
|
155
|
+
def handle_legacy_is_cross_encoder(cls, data: Any) -> Any:
|
|
156
|
+
"""Handle legacy is_cross_encoder field by converting it to model_type.
|
|
157
|
+
|
|
158
|
+
This validator handles backward compatibility for the deprecated is_cross_encoder field.
|
|
159
|
+
If is_cross_encoder=True is provided, it adds "cross_encoder" to model_type.
|
|
160
|
+
"""
|
|
161
|
+
if isinstance(data, dict) and "is_cross_encoder" in data:
|
|
162
|
+
is_cross_encoder_value = data.pop("is_cross_encoder")
|
|
163
|
+
|
|
164
|
+
if is_cross_encoder_value is not None:
|
|
165
|
+
warnings.warn(
|
|
166
|
+
"is_cross_encoder is deprecated and will be removed in a future version. "
|
|
167
|
+
"Use model_type=['cross-encoder'] instead.",
|
|
168
|
+
DeprecationWarning,
|
|
169
|
+
stacklevel=2,
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
model_type = data.get("model_type", ["dense"])
|
|
173
|
+
|
|
174
|
+
if is_cross_encoder_value:
|
|
175
|
+
if "cross-encoder" not in model_type:
|
|
176
|
+
data["model_type"] = ["cross-encoder"]
|
|
177
|
+
else:
|
|
178
|
+
if "cross-encoder" in model_type:
|
|
179
|
+
model_type = [t for t in model_type if t != "cross-encoder"]
|
|
180
|
+
data["model_type"] = model_type if model_type else ["dense"]
|
|
181
|
+
|
|
182
|
+
return data
|
|
183
|
+
|
|
184
|
+
@property
|
|
185
|
+
def is_cross_encoder(self) -> bool:
|
|
186
|
+
"""Returns True if the model is a cross-encoder.
|
|
187
|
+
|
|
188
|
+
Derived from model_type field. A model is considered a cross-encoder if "cross-encoder" is in its model_type list.
|
|
189
|
+
"""
|
|
190
|
+
return "cross-encoder" in self.model_type
|
|
191
|
+
|
|
127
192
|
@field_validator("similarity_fn_name", mode="before")
|
|
128
193
|
@classmethod
|
|
129
194
|
def _validate_similarity_fn_name(cls, value: str) -> ScoringFunction | None:
|
|
@@ -159,6 +224,7 @@ class ModelMeta(BaseModel):
|
|
|
159
224
|
else dict_repr["training_datasets"]
|
|
160
225
|
)
|
|
161
226
|
dict_repr["loader"] = _get_loader_name(loader)
|
|
227
|
+
dict_repr["is_cross_encoder"] = self.is_cross_encoder
|
|
162
228
|
return dict_repr
|
|
163
229
|
|
|
164
230
|
@field_validator("languages")
|
|
@@ -212,9 +278,199 @@ class ModelMeta(BaseModel):
|
|
|
212
278
|
raise ValueError("Model name is not set")
|
|
213
279
|
return self.name.replace("/", "__").replace(" ", "_")
|
|
214
280
|
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
281
|
+
@classmethod
|
|
282
|
+
def _from_hub(
|
|
283
|
+
cls,
|
|
284
|
+
model_name: str | None,
|
|
285
|
+
revision: str | None = None,
|
|
286
|
+
compute_metadata: bool = True,
|
|
287
|
+
) -> Self:
|
|
288
|
+
"""Generates a ModelMeta from a HuggingFace model name.
|
|
289
|
+
|
|
290
|
+
Args:
|
|
291
|
+
model_name: The HuggingFace model name.
|
|
292
|
+
revision: Revision of the model
|
|
293
|
+
compute_metadata: Add metadata based on model card
|
|
294
|
+
|
|
295
|
+
Returns:
|
|
296
|
+
The generated ModelMeta.
|
|
297
|
+
"""
|
|
298
|
+
from mteb.models import sentence_transformers_loader
|
|
299
|
+
|
|
300
|
+
loader = sentence_transformers_loader
|
|
301
|
+
frameworks: list[FRAMEWORKS] = ["PyTorch"]
|
|
302
|
+
model_license = None
|
|
303
|
+
reference = None
|
|
304
|
+
n_parameters = None
|
|
305
|
+
memory_usage_mb = None
|
|
306
|
+
release_date = None
|
|
307
|
+
embedding_dim = None
|
|
308
|
+
max_tokens = None
|
|
309
|
+
|
|
310
|
+
if model_name and compute_metadata and repo_exists(model_name):
|
|
311
|
+
reference = "https://huggingface.co/" + model_name
|
|
312
|
+
card = ModelCard.load(model_name)
|
|
313
|
+
card_data: ModelCardData = card.data
|
|
314
|
+
try:
|
|
315
|
+
model_config = AutoConfig.from_pretrained(model_name)
|
|
316
|
+
except Exception as e:
|
|
317
|
+
# some models can't load AutoConfig (e.g. `average_word_embeddings_levy_dependency`)
|
|
318
|
+
model_config = None
|
|
319
|
+
logger.warning(f"Can't get configuration for {model_name}. Error: {e}")
|
|
320
|
+
|
|
321
|
+
if (
|
|
322
|
+
card_data.library_name == _SENTENCE_TRANSFORMER_LIB_NAME
|
|
323
|
+
or _SENTENCE_TRANSFORMER_LIB_NAME in card_data.tags
|
|
324
|
+
):
|
|
325
|
+
frameworks.append(_SENTENCE_TRANSFORMER_LIB_NAME)
|
|
326
|
+
else:
|
|
327
|
+
msg = "Model library not recognized, defaulting to Sentence Transformers loader."
|
|
328
|
+
logger.warning(msg)
|
|
329
|
+
warnings.warn(msg)
|
|
330
|
+
|
|
331
|
+
if revision is None:
|
|
332
|
+
revisions = _get_repo_commits(model_name, "model")
|
|
333
|
+
revision = revisions[0].commit_id if revisions else None
|
|
334
|
+
|
|
335
|
+
release_date = cls.fetch_release_date(model_name)
|
|
336
|
+
model_license = card_data.license
|
|
337
|
+
n_parameters = cls._calculate_num_parameters_from_hub(model_name)
|
|
338
|
+
memory_usage_mb = cls._calculate_memory_usage_mb(model_name, n_parameters)
|
|
339
|
+
if model_config and hasattr(model_config, "hidden_size"):
|
|
340
|
+
embedding_dim = model_config.hidden_size
|
|
341
|
+
if model_config and hasattr(model_config, "max_position_embeddings"):
|
|
342
|
+
max_tokens = model_config.max_position_embeddings
|
|
343
|
+
|
|
344
|
+
return cls(
|
|
345
|
+
loader=loader,
|
|
346
|
+
name=model_name or "no_model_name/available",
|
|
347
|
+
revision=revision or "no_revision_available",
|
|
348
|
+
reference=reference,
|
|
349
|
+
release_date=release_date,
|
|
350
|
+
languages=None,
|
|
351
|
+
license=model_license,
|
|
352
|
+
framework=frameworks,
|
|
353
|
+
training_datasets=None,
|
|
354
|
+
similarity_fn_name=None,
|
|
355
|
+
n_parameters=n_parameters,
|
|
356
|
+
memory_usage_mb=memory_usage_mb,
|
|
357
|
+
max_tokens=max_tokens,
|
|
358
|
+
embed_dim=embedding_dim,
|
|
359
|
+
open_weights=True,
|
|
360
|
+
public_training_code=None,
|
|
361
|
+
public_training_data=None,
|
|
362
|
+
use_instructions=None,
|
|
363
|
+
modalities=[],
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
@classmethod
|
|
367
|
+
def from_sentence_transformer_model(
|
|
368
|
+
cls,
|
|
369
|
+
model: SentenceTransformer,
|
|
370
|
+
revision: str | None = None,
|
|
371
|
+
compute_metadata: bool = True,
|
|
372
|
+
) -> Self:
|
|
373
|
+
"""Generates a ModelMeta from a SentenceTransformer model.
|
|
374
|
+
|
|
375
|
+
Args:
|
|
376
|
+
model: SentenceTransformer model.
|
|
377
|
+
revision: Revision of the model
|
|
378
|
+
compute_metadata: Add metadata based on model card
|
|
379
|
+
|
|
380
|
+
Returns:
|
|
381
|
+
The generated ModelMeta.
|
|
382
|
+
"""
|
|
383
|
+
name: str | None = (
|
|
384
|
+
model.model_card_data.model_name
|
|
385
|
+
if model.model_card_data.model_name
|
|
386
|
+
else model.model_card_data.base_model
|
|
387
|
+
)
|
|
388
|
+
meta = cls._from_hub(name, revision, compute_metadata)
|
|
389
|
+
if _SENTENCE_TRANSFORMER_LIB_NAME not in meta.framework:
|
|
390
|
+
meta.framework.append("Sentence Transformers")
|
|
391
|
+
meta.revision = model.model_card_data.base_model_revision or meta.revision
|
|
392
|
+
meta.max_tokens = model.max_seq_length
|
|
393
|
+
meta.embed_dim = model.get_sentence_embedding_dimension()
|
|
394
|
+
meta.similarity_fn_name = ScoringFunction.from_str(model.similarity_fn_name)
|
|
395
|
+
meta.modalities = ["text"]
|
|
396
|
+
return meta
|
|
397
|
+
|
|
398
|
+
@classmethod
|
|
399
|
+
def from_hub(
|
|
400
|
+
cls,
|
|
401
|
+
model: str,
|
|
402
|
+
revision: str | None = None,
|
|
403
|
+
compute_metadata: bool = True,
|
|
404
|
+
) -> Self:
|
|
405
|
+
"""Generates a ModelMeta for model from HuggingFace hub.
|
|
406
|
+
|
|
407
|
+
Args:
|
|
408
|
+
model: Name of the model from HuggingFace hub. For example, `intfloat/multilingual-e5-large`
|
|
409
|
+
revision: Revision of the model
|
|
410
|
+
compute_metadata: Add metadata based on model card
|
|
411
|
+
|
|
412
|
+
Returns:
|
|
413
|
+
The generated ModelMeta.
|
|
414
|
+
"""
|
|
415
|
+
meta = cls._from_hub(model, revision, compute_metadata)
|
|
416
|
+
if _SENTENCE_TRANSFORMER_LIB_NAME not in meta.framework:
|
|
417
|
+
meta.framework.append("Sentence Transformers")
|
|
418
|
+
meta.modalities = ["text"]
|
|
419
|
+
|
|
420
|
+
if model and compute_metadata and repo_exists(model):
|
|
421
|
+
# have max_seq_length field
|
|
422
|
+
sbert_config = _get_json_from_hub(
|
|
423
|
+
model, "sentence_bert_config.json", "model", revision=revision
|
|
424
|
+
)
|
|
425
|
+
if sbert_config:
|
|
426
|
+
meta.max_tokens = (
|
|
427
|
+
sbert_config.get("max_seq_length", None) or meta.max_tokens
|
|
428
|
+
)
|
|
429
|
+
# have model type, similarity function fields
|
|
430
|
+
config_sbert = _get_json_from_hub(
|
|
431
|
+
model, "config_sentence_transformers.json", "model", revision=revision
|
|
432
|
+
)
|
|
433
|
+
if (
|
|
434
|
+
config_sbert is not None
|
|
435
|
+
and config_sbert.get("similarity_fn_name") is not None
|
|
436
|
+
):
|
|
437
|
+
meta.similarity_fn_name = ScoringFunction.from_str(
|
|
438
|
+
config_sbert.get("similarity_fn_name")
|
|
439
|
+
)
|
|
440
|
+
else:
|
|
441
|
+
meta.similarity_fn_name = ScoringFunction.COSINE
|
|
442
|
+
return meta
|
|
443
|
+
|
|
444
|
+
@classmethod
|
|
445
|
+
def from_cross_encoder(
|
|
446
|
+
cls,
|
|
447
|
+
model: CrossEncoder,
|
|
448
|
+
revision: str | None = None,
|
|
449
|
+
compute_metadata: bool = True,
|
|
450
|
+
) -> Self:
|
|
451
|
+
"""Generates a ModelMeta from a CrossEncoder.
|
|
452
|
+
|
|
453
|
+
Args:
|
|
454
|
+
model: The CrossEncoder model
|
|
455
|
+
revision: Revision of the model
|
|
456
|
+
compute_metadata: Add metadata based on model card
|
|
457
|
+
|
|
458
|
+
Returns:
|
|
459
|
+
The generated ModelMeta
|
|
460
|
+
"""
|
|
461
|
+
from mteb.models import CrossEncoderWrapper
|
|
462
|
+
|
|
463
|
+
meta = cls._from_hub(model.model.name_or_path, revision, compute_metadata)
|
|
464
|
+
if _SENTENCE_TRANSFORMER_LIB_NAME not in meta.framework:
|
|
465
|
+
meta.framework.append("Sentence Transformers")
|
|
466
|
+
meta.revision = model.config._commit_hash or meta.revision
|
|
467
|
+
meta.loader = CrossEncoderWrapper
|
|
468
|
+
meta.embed_dim = None
|
|
469
|
+
meta.modalities = ["text"]
|
|
470
|
+
meta.model_type = ["cross-encoder"]
|
|
471
|
+
return meta
|
|
472
|
+
|
|
473
|
+
def is_zero_shot_on(self, tasks: Sequence[AbsTask] | Sequence[str]) -> bool | None:
|
|
218
474
|
"""Indicates whether the given model can be considered zero-shot or not on the given tasks.
|
|
219
475
|
|
|
220
476
|
Returns:
|
|
@@ -267,7 +523,7 @@ class ModelMeta(BaseModel):
|
|
|
267
523
|
return return_dataset
|
|
268
524
|
|
|
269
525
|
def zero_shot_percentage(
|
|
270
|
-
self, tasks: Sequence[
|
|
526
|
+
self, tasks: Sequence[AbsTask] | Sequence[str]
|
|
271
527
|
) -> int | None:
|
|
272
528
|
"""Indicates how out-of-domain the selected tasks are for the given model.
|
|
273
529
|
|
|
@@ -290,18 +546,38 @@ class ModelMeta(BaseModel):
|
|
|
290
546
|
perc_overlap = 100 * (len(overlap) / len(benchmark_datasets))
|
|
291
547
|
return int(100 - perc_overlap)
|
|
292
548
|
|
|
293
|
-
|
|
294
|
-
|
|
549
|
+
@staticmethod
|
|
550
|
+
def _calculate_num_parameters_from_hub(model_name: str | None = None) -> int | None:
|
|
551
|
+
try:
|
|
552
|
+
safetensors_metadata = get_safetensors_metadata(model_name)
|
|
553
|
+
if len(safetensors_metadata.parameter_count) >= 0:
|
|
554
|
+
return sum(safetensors_metadata.parameter_count.values())
|
|
555
|
+
except (
|
|
556
|
+
NotASafetensorsRepoError,
|
|
557
|
+
SafetensorsParsingError,
|
|
558
|
+
GatedRepoError,
|
|
559
|
+
RepositoryNotFoundError,
|
|
560
|
+
) as e:
|
|
561
|
+
logger.warning(
|
|
562
|
+
f"Can't calculate number of parameters for {model_name}. Got error {e}"
|
|
563
|
+
)
|
|
564
|
+
return None
|
|
565
|
+
|
|
566
|
+
def calculate_num_parameters_from_hub(self) -> int | None:
|
|
567
|
+
"""Calculates the number of parameters in the model.
|
|
295
568
|
|
|
296
569
|
Returns:
|
|
297
|
-
|
|
570
|
+
Number of parameters in the model.
|
|
298
571
|
"""
|
|
299
|
-
|
|
300
|
-
return None
|
|
572
|
+
return self._calculate_num_parameters_from_hub(self.name)
|
|
301
573
|
|
|
574
|
+
@staticmethod
|
|
575
|
+
def _calculate_memory_usage_mb(
|
|
576
|
+
model_name: str, n_parameters: int | None
|
|
577
|
+
) -> int | None:
|
|
302
578
|
MB = 1024**2 # noqa: N806
|
|
303
579
|
try:
|
|
304
|
-
safetensors_metadata = get_safetensors_metadata(
|
|
580
|
+
safetensors_metadata = get_safetensors_metadata(model_name)
|
|
305
581
|
if len(safetensors_metadata.parameter_count) >= 0:
|
|
306
582
|
dtype_size_map = {
|
|
307
583
|
"F64": 8, # 64-bit float
|
|
@@ -320,18 +596,130 @@ class ModelMeta(BaseModel):
|
|
|
320
596
|
for dtype, parameters in safetensors_metadata.parameter_count.items()
|
|
321
597
|
)
|
|
322
598
|
return round(total_memory_bytes / MB) # Convert to MB
|
|
599
|
+
except (
|
|
600
|
+
NotASafetensorsRepoError,
|
|
601
|
+
SafetensorsParsingError,
|
|
602
|
+
GatedRepoError,
|
|
603
|
+
RepositoryNotFoundError,
|
|
604
|
+
) as e:
|
|
605
|
+
logger.warning(
|
|
606
|
+
f"Can't calculate memory usage for {model_name}. Got error {e}"
|
|
607
|
+
)
|
|
323
608
|
|
|
324
|
-
|
|
325
|
-
pass
|
|
326
|
-
if self.n_parameters is None:
|
|
609
|
+
if n_parameters is None:
|
|
327
610
|
return None
|
|
328
611
|
# Model memory in bytes. For FP32 each parameter is 4 bytes.
|
|
329
|
-
model_memory_bytes =
|
|
612
|
+
model_memory_bytes = n_parameters * 4
|
|
330
613
|
|
|
331
614
|
# Convert to MB
|
|
332
615
|
model_memory_mb = model_memory_bytes / MB
|
|
333
616
|
return round(model_memory_mb)
|
|
334
617
|
|
|
618
|
+
def calculate_memory_usage_mb(self) -> int | None:
|
|
619
|
+
"""Calculates the memory usage of the model in MB.
|
|
620
|
+
|
|
621
|
+
Returns:
|
|
622
|
+
The memory usage of the model in MB, or None if it cannot be determined.
|
|
623
|
+
"""
|
|
624
|
+
if "API" in self.framework or self.name is None:
|
|
625
|
+
return None
|
|
626
|
+
|
|
627
|
+
return self._calculate_memory_usage_mb(self.model_name, self.n_parameters)
|
|
628
|
+
|
|
629
|
+
@staticmethod
|
|
630
|
+
def fetch_release_date(model_name: str) -> StrDate | None:
|
|
631
|
+
"""Fetches the release date from HuggingFace Hub based on the first commit.
|
|
632
|
+
|
|
633
|
+
Returns:
|
|
634
|
+
The release date in YYYY-MM-DD format, or None if it cannot be determined.
|
|
635
|
+
"""
|
|
636
|
+
commits = _get_repo_commits(repo_id=model_name, repo_type="model")
|
|
637
|
+
if commits:
|
|
638
|
+
initial_commit = commits[-1]
|
|
639
|
+
release_date = initial_commit.created_at.strftime("%Y-%m-%d")
|
|
640
|
+
return release_date
|
|
641
|
+
return None
|
|
642
|
+
|
|
643
|
+
def to_python(self) -> str:
|
|
644
|
+
"""Returns a string representation of the model."""
|
|
645
|
+
return _pydantic_instance_to_code(self)
|
|
646
|
+
|
|
647
|
+
|
|
648
|
+
def _pydantic_instance_to_code(
|
|
649
|
+
model: BaseModel,
|
|
650
|
+
indent: int = 4,
|
|
651
|
+
*,
|
|
652
|
+
only_set_fields: bool = False,
|
|
653
|
+
) -> str:
|
|
654
|
+
"""Convert a Pydantic model instance into valid Python constructor code.
|
|
655
|
+
|
|
656
|
+
If only_set_fields=True, only fields explicitly provided at model construction
|
|
657
|
+
time are printed (i.e., excludes fields that came only from defaults).
|
|
658
|
+
|
|
659
|
+
Arguments:
|
|
660
|
+
model: The Pydantic model to convert.
|
|
661
|
+
indent: The indentation to use.
|
|
662
|
+
only_set_fields: If True, only fields explicitly provided at model construction time
|
|
663
|
+
"""
|
|
664
|
+
cls_name = model.__class__.__name__
|
|
665
|
+
pad = " " * indent
|
|
666
|
+
lines: list[str] = [f"{cls_name}("]
|
|
667
|
+
|
|
668
|
+
model_fields = list(type(model).model_fields.keys())
|
|
669
|
+
|
|
670
|
+
if only_set_fields:
|
|
671
|
+
field_names = [n for n in model_fields if n in model.model_fields_set]
|
|
672
|
+
else:
|
|
673
|
+
field_names = model_fields
|
|
674
|
+
|
|
675
|
+
for field_name in field_names:
|
|
676
|
+
value = getattr(model, field_name)
|
|
677
|
+
value_code = _value_to_code(value, indent)
|
|
678
|
+
lines.append(f"{pad}{field_name}={value_code},")
|
|
679
|
+
|
|
680
|
+
lines.append(")")
|
|
681
|
+
return "\n".join(lines)
|
|
682
|
+
|
|
683
|
+
|
|
684
|
+
def _value_to_code(value: Any, indent: int) -> str:
|
|
685
|
+
"""Convert a Python value into valid Python source code."""
|
|
686
|
+
if isinstance(value, BaseModel):
|
|
687
|
+
return _pydantic_instance_to_code(value, indent, only_set_fields=True)
|
|
688
|
+
|
|
689
|
+
if callable(value):
|
|
690
|
+
if isinstance(value, partial):
|
|
691
|
+
return value.func.__name__
|
|
692
|
+
return value.__name__
|
|
693
|
+
|
|
694
|
+
if isinstance(value, Enum):
|
|
695
|
+
return f"{value.__class__.__name__}.{value.name}"
|
|
696
|
+
|
|
697
|
+
if isinstance(value, str):
|
|
698
|
+
return repr(value)
|
|
699
|
+
|
|
700
|
+
if isinstance(value, list):
|
|
701
|
+
if not value:
|
|
702
|
+
return "[]"
|
|
703
|
+
inner = ", ".join(_value_to_code(v, indent) for v in value)
|
|
704
|
+
return f"[{inner}]"
|
|
705
|
+
|
|
706
|
+
if isinstance(value, set):
|
|
707
|
+
if not value:
|
|
708
|
+
return "set()"
|
|
709
|
+
inner = ", ".join(_value_to_code(v, indent) for v in sorted(value))
|
|
710
|
+
return f"{{{inner}}}"
|
|
711
|
+
|
|
712
|
+
if isinstance(value, dict):
|
|
713
|
+
if not value:
|
|
714
|
+
return "{}"
|
|
715
|
+
inner = ", ".join(
|
|
716
|
+
f"{_value_to_code(k, indent)}: {_value_to_code(v, indent)}"
|
|
717
|
+
for k, v in value.items()
|
|
718
|
+
)
|
|
719
|
+
return f"{{{inner}}}"
|
|
720
|
+
|
|
721
|
+
return repr(value)
|
|
722
|
+
|
|
335
723
|
|
|
336
724
|
def _collect_similar_tasks(dataset: str, visited: set[str]) -> set[str]:
|
|
337
725
|
"""Recursively collect all similar tasks for a given dataset.
|
|
@@ -364,3 +752,35 @@ def _collect_similar_tasks(dataset: str, visited: set[str]) -> set[str]:
|
|
|
364
752
|
similar.update(_collect_similar_tasks(parent, visited))
|
|
365
753
|
|
|
366
754
|
return similar
|
|
755
|
+
|
|
756
|
+
|
|
757
|
+
def _get_repo_commits(repo_id: str, repo_type: str) -> list[GitCommitInfo] | None:
|
|
758
|
+
try:
|
|
759
|
+
return list_repo_commits(repo_id=repo_id, repo_type=repo_type)
|
|
760
|
+
except (GatedRepoError, RepositoryNotFoundError) as e:
|
|
761
|
+
logger.warning(f"Can't get commits of {repo_id}: {e}")
|
|
762
|
+
return None
|
|
763
|
+
|
|
764
|
+
|
|
765
|
+
def _get_json_from_hub(
|
|
766
|
+
repo_id: str, file_name: str, repo_type: str, revision: str | None = None
|
|
767
|
+
) -> dict[str, Any] | None:
|
|
768
|
+
path = _get_file_on_hub(repo_id, file_name, repo_type, revision)
|
|
769
|
+
if path is None:
|
|
770
|
+
return None
|
|
771
|
+
|
|
772
|
+
with Path(path).open() as f:
|
|
773
|
+
js = json.load(f)
|
|
774
|
+
return js
|
|
775
|
+
|
|
776
|
+
|
|
777
|
+
def _get_file_on_hub(
|
|
778
|
+
repo_id: str, file_name: str, repo_type: str, revision: str | None = None
|
|
779
|
+
) -> str | None:
|
|
780
|
+
try:
|
|
781
|
+
return hf_hub_download(
|
|
782
|
+
repo_id=repo_id, filename=file_name, repo_type=repo_type, revision=revision
|
|
783
|
+
)
|
|
784
|
+
except (GatedRepoError, RepositoryNotFoundError, EntryNotFoundError) as e:
|
|
785
|
+
logger.warning(f"Can't get file {file_name} of {repo_id}: {e}")
|
|
786
|
+
return None
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
from collections.abc import Callable
|
|
2
|
+
from typing import Protocol
|
|
3
|
+
|
|
4
|
+
from mteb.types import Array, TopRankedDocumentsType
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class IndexEncoderSearchProtocol(Protocol):
|
|
8
|
+
"""Protocol for search backends used in encoder-based retrieval."""
|
|
9
|
+
|
|
10
|
+
def add_documents(
|
|
11
|
+
self,
|
|
12
|
+
embeddings: Array,
|
|
13
|
+
idxs: list[str],
|
|
14
|
+
) -> None:
|
|
15
|
+
"""Add documents to the search backend.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
embeddings: Embeddings of the documents to add.
|
|
19
|
+
idxs: IDs of the documents to add.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
def search(
|
|
23
|
+
self,
|
|
24
|
+
embeddings: Array,
|
|
25
|
+
top_k: int,
|
|
26
|
+
similarity_fn: Callable[[Array, Array], Array],
|
|
27
|
+
top_ranked: TopRankedDocumentsType | None = None,
|
|
28
|
+
query_idx_to_id: dict[int, str] | None = None,
|
|
29
|
+
) -> tuple[list[list[float]], list[list[int]]]:
|
|
30
|
+
"""Search through added corpus embeddings or rerank top-ranked documents.
|
|
31
|
+
|
|
32
|
+
Supports both full-corpus and reranking search modes:
|
|
33
|
+
- Full-corpus mode: `top_ranked=None`, uses added corpus embeddings.
|
|
34
|
+
- Reranking mode: `top_ranked` contains mapping {query_id: [doc_ids]}.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
embeddings: Query embeddings, shape (num_queries, dim).
|
|
38
|
+
top_k: Number of top results to return.
|
|
39
|
+
similarity_fn: Function to compute similarity between query and corpus.
|
|
40
|
+
top_ranked: Mapping of query_id -> list of candidate doc_ids. Used for reranking.
|
|
41
|
+
query_idx_to_id: Mapping of query index -> query_id. Used for reranking.
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
A tuple (top_k_values, top_k_indices), for each query:
|
|
45
|
+
- top_k_values: List of top-k similarity scores.
|
|
46
|
+
- top_k_indices: List of indices of the top-k documents in the added corpus.
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
def clear(self) -> None:
|
|
50
|
+
"""Clear all stored documents and embeddings from the backend."""
|