mteb 2.1.4__py3-none-any.whl → 2.7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (527) hide show
  1. mteb/__init__.py +6 -0
  2. mteb/_create_dataloaders.py +22 -20
  3. mteb/_evaluators/any_sts_evaluator.py +23 -14
  4. mteb/_evaluators/classification_metrics.py +54 -0
  5. mteb/_evaluators/clustering_evaluator.py +3 -3
  6. mteb/_evaluators/evaluator.py +4 -2
  7. mteb/_evaluators/image/imagetext_pairclassification_evaluator.py +18 -11
  8. mteb/_evaluators/pair_classification_evaluator.py +34 -40
  9. mteb/_evaluators/retrieval_evaluator.py +2 -2
  10. mteb/_evaluators/retrieval_metrics.py +18 -17
  11. mteb/_evaluators/sklearn_evaluator.py +25 -37
  12. mteb/_evaluators/text/bitext_mining_evaluator.py +31 -19
  13. mteb/_evaluators/text/summarization_evaluator.py +27 -20
  14. mteb/_evaluators/zeroshot_classification_evaluator.py +7 -5
  15. mteb/abstasks/_data_filter/__init__.py +0 -0
  16. mteb/abstasks/_data_filter/filters.py +125 -0
  17. mteb/abstasks/_data_filter/task_pipelines.py +105 -0
  18. mteb/abstasks/_statistics_calculation.py +23 -11
  19. mteb/abstasks/_stratification.py +18 -18
  20. mteb/abstasks/abstask.py +35 -28
  21. mteb/abstasks/aggregate_task_metadata.py +1 -9
  22. mteb/abstasks/aggregated_task.py +10 -29
  23. mteb/abstasks/classification.py +15 -12
  24. mteb/abstasks/clustering.py +20 -16
  25. mteb/abstasks/clustering_legacy.py +13 -10
  26. mteb/abstasks/image/image_text_pair_classification.py +7 -4
  27. mteb/abstasks/multilabel_classification.py +33 -22
  28. mteb/abstasks/pair_classification.py +27 -11
  29. mteb/abstasks/regression.py +4 -4
  30. mteb/abstasks/retrieval.py +28 -24
  31. mteb/abstasks/retrieval_dataset_loaders.py +2 -2
  32. mteb/abstasks/sts.py +14 -4
  33. mteb/abstasks/task_metadata.py +32 -33
  34. mteb/abstasks/text/bitext_mining.py +39 -28
  35. mteb/abstasks/text/reranking.py +8 -6
  36. mteb/abstasks/text/summarization.py +10 -5
  37. mteb/abstasks/zeroshot_classification.py +8 -4
  38. mteb/benchmarks/_create_table.py +84 -37
  39. mteb/benchmarks/benchmark.py +77 -16
  40. mteb/benchmarks/benchmarks/__init__.py +12 -0
  41. mteb/benchmarks/benchmarks/benchmarks.py +361 -16
  42. mteb/benchmarks/get_benchmark.py +14 -53
  43. mteb/cache.py +227 -37
  44. mteb/cli/_display_tasks.py +2 -2
  45. mteb/cli/build_cli.py +110 -14
  46. mteb/cli/generate_model_card.py +43 -23
  47. mteb/deprecated_evaluator.py +71 -62
  48. mteb/descriptive_stats/BitextMining/RuSciBenchBitextMining.v2.json +61 -0
  49. mteb/descriptive_stats/Classification/HebrewSentimentAnalysis.v3.json +60 -0
  50. mteb/descriptive_stats/Classification/TurkishConstitutionalCourtViolation.json +54 -0
  51. mteb/descriptive_stats/Image/DocumentUnderstanding/KoVidore2CybersecurityRetrieval.json +32 -0
  52. mteb/descriptive_stats/Image/DocumentUnderstanding/KoVidore2EconomicRetrieval.json +32 -0
  53. mteb/descriptive_stats/Image/DocumentUnderstanding/KoVidore2EnergyRetrieval.json +32 -0
  54. mteb/descriptive_stats/Image/DocumentUnderstanding/KoVidore2HrRetrieval.json +32 -0
  55. mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3ComputerScienceRetrieval.json +214 -0
  56. mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3EnergyRetrieval.json +214 -0
  57. mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3FinanceEnRetrieval.json +214 -0
  58. mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3FinanceFrRetrieval.json +214 -0
  59. mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3HrRetrieval.json +214 -0
  60. mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3IndustrialRetrieval.json +214 -0
  61. mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3NuclearRetrieval.json +214 -0
  62. mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3PharmaceuticalsRetrieval.json +214 -0
  63. mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3PhysicsRetrieval.json +214 -0
  64. mteb/descriptive_stats/Image/DocumentUnderstanding/Vidore3TelecomRetrieval.json +214 -0
  65. mteb/descriptive_stats/PairClassification/TERRa.V2.json +35 -0
  66. mteb/descriptive_stats/Reranking/JQaRARerankingLite.json +35 -0
  67. mteb/descriptive_stats/Reranking/JaCWIRRerankingLite.json +35 -0
  68. mteb/descriptive_stats/Reranking/MultiLongDocReranking.json +466 -0
  69. mteb/descriptive_stats/Retrieval/ArguAna-NL.v2.json +30 -0
  70. mteb/descriptive_stats/Retrieval/ChemRxivRetrieval.json +30 -0
  71. mteb/descriptive_stats/Retrieval/EuroPIRQRetrieval.json +116 -0
  72. mteb/descriptive_stats/Retrieval/JaCWIRRetrievalLite.json +30 -0
  73. mteb/descriptive_stats/Retrieval/JaqketRetrievalLite.json +30 -0
  74. mteb/descriptive_stats/Retrieval/MIRACLJaRetrievalLite.json +30 -0
  75. mteb/descriptive_stats/Retrieval/MrTyDiJaRetrievalLite.json +30 -0
  76. mteb/descriptive_stats/Retrieval/NFCorpus-NL.v2.json +30 -0
  77. mteb/descriptive_stats/Retrieval/NanoClimateFEVER-VN.json +30 -0
  78. mteb/descriptive_stats/Retrieval/NanoDBPedia-VN.json +30 -0
  79. mteb/descriptive_stats/Retrieval/NanoFEVER-VN.json +30 -0
  80. mteb/descriptive_stats/Retrieval/NanoHotpotQA-VN.json +30 -0
  81. mteb/descriptive_stats/Retrieval/NanoMSMARCO-VN.json +30 -0
  82. mteb/descriptive_stats/Retrieval/NanoNQ-VN.json +30 -0
  83. mteb/descriptive_stats/Retrieval/SCIDOCS-NL.v2.json +30 -0
  84. mteb/descriptive_stats/Retrieval/SQuADKorV1Retrieval.json +30 -0
  85. mteb/descriptive_stats/Retrieval/SciFact-NL.v2.json +30 -0
  86. mteb/descriptive_stats/Retrieval/TVPLRetrieval.json +30 -0
  87. mteb/evaluate.py +106 -75
  88. mteb/filter_tasks.py +25 -26
  89. mteb/get_tasks.py +29 -30
  90. mteb/languages/language_scripts.py +5 -3
  91. mteb/leaderboard/app.py +414 -151
  92. mteb/leaderboard/benchmark_selector.py +14 -5
  93. mteb/leaderboard/figures.py +13 -15
  94. mteb/leaderboard/table.py +82 -17
  95. mteb/load_results.py +12 -12
  96. mteb/models/__init__.py +4 -1
  97. mteb/models/abs_encoder.py +31 -23
  98. mteb/models/cache_wrappers/__init__.py +2 -1
  99. mteb/models/cache_wrappers/cache_backend_protocol.py +3 -5
  100. mteb/models/cache_wrappers/cache_backends/_hash_utils.py +7 -6
  101. mteb/models/cache_wrappers/cache_backends/faiss_cache.py +6 -2
  102. mteb/models/cache_wrappers/cache_backends/numpy_cache.py +43 -25
  103. mteb/models/cache_wrappers/cache_wrapper.py +3 -3
  104. mteb/models/get_model_meta.py +25 -118
  105. mteb/models/instruct_wrapper.py +33 -9
  106. mteb/models/model_implementations/align_models.py +8 -1
  107. mteb/models/model_implementations/amazon_models.py +1 -0
  108. mteb/models/model_implementations/andersborges.py +65 -0
  109. mteb/models/model_implementations/ara_models.py +9 -1
  110. mteb/models/model_implementations/arctic_models.py +16 -8
  111. mteb/models/model_implementations/b1ade_models.py +2 -1
  112. mteb/models/model_implementations/bedrock_models.py +4 -0
  113. mteb/models/model_implementations/bge_models.py +101 -17
  114. mteb/models/model_implementations/bica_model.py +35 -0
  115. mteb/models/model_implementations/blip2_models.py +13 -2
  116. mteb/models/model_implementations/blip_models.py +43 -16
  117. mteb/models/model_implementations/bm25.py +5 -4
  118. mteb/models/model_implementations/bmretriever_models.py +10 -4
  119. mteb/models/model_implementations/cadet_models.py +10 -1
  120. mteb/models/model_implementations/cde_models.py +25 -4
  121. mteb/models/model_implementations/clip_models.py +9 -6
  122. mteb/models/model_implementations/clips_models.py +100 -0
  123. mteb/models/model_implementations/codefuse_models.py +165 -3
  124. mteb/models/model_implementations/codesage_models.py +18 -3
  125. mteb/models/model_implementations/cohere_models.py +13 -6
  126. mteb/models/model_implementations/cohere_v.py +7 -2
  127. mteb/models/model_implementations/colpali_models.py +17 -9
  128. mteb/models/model_implementations/colqwen_models.py +275 -5
  129. mteb/models/model_implementations/colsmol_models.py +4 -2
  130. mteb/models/model_implementations/conan_models.py +2 -1
  131. mteb/models/model_implementations/dino_models.py +194 -23
  132. mteb/models/model_implementations/e5_instruct.py +27 -4
  133. mteb/models/model_implementations/e5_models.py +21 -110
  134. mteb/models/model_implementations/e5_v.py +7 -6
  135. mteb/models/model_implementations/eagerworks_models.py +164 -0
  136. mteb/models/model_implementations/emillykkejensen_models.py +91 -0
  137. mteb/models/model_implementations/en_code_retriever.py +2 -1
  138. mteb/models/model_implementations/euler_models.py +32 -0
  139. mteb/models/model_implementations/evaclip_models.py +4 -0
  140. mteb/models/model_implementations/fa_models.py +67 -9
  141. mteb/models/model_implementations/facebookai.py +205 -0
  142. mteb/models/model_implementations/geogpt_models.py +2 -1
  143. mteb/models/model_implementations/gme_v_models.py +17 -10
  144. mteb/models/model_implementations/google_models.py +17 -6
  145. mteb/models/model_implementations/granite_vision_embedding_models.py +8 -3
  146. mteb/models/model_implementations/gritlm_models.py +4 -2
  147. mteb/models/model_implementations/gte_models.py +99 -9
  148. mteb/models/model_implementations/hinvec_models.py +2 -1
  149. mteb/models/model_implementations/human.py +1 -0
  150. mteb/models/model_implementations/ibm_granite_models.py +36 -6
  151. mteb/models/model_implementations/inf_models.py +4 -2
  152. mteb/models/model_implementations/jasper_models.py +256 -3
  153. mteb/models/model_implementations/jina_clip.py +49 -10
  154. mteb/models/model_implementations/jina_models.py +222 -11
  155. mteb/models/model_implementations/kalm_models.py +203 -25
  156. mteb/models/model_implementations/kblab.py +37 -0
  157. mteb/models/model_implementations/kennethenevoldsen_models.py +74 -0
  158. mteb/models/model_implementations/kfst.py +25 -0
  159. mteb/models/model_implementations/kowshik24_models.py +32 -0
  160. mteb/models/model_implementations/lens_models.py +2 -0
  161. mteb/models/model_implementations/lgai_embedding_models.py +2 -1
  162. mteb/models/model_implementations/linq_models.py +4 -3
  163. mteb/models/model_implementations/listconranker.py +2 -2
  164. mteb/models/model_implementations/llm2clip_models.py +9 -6
  165. mteb/models/model_implementations/llm2vec_models.py +16 -8
  166. mteb/models/model_implementations/mcinext_models.py +7 -1
  167. mteb/models/model_implementations/mdbr_models.py +19 -3
  168. mteb/models/model_implementations/misc_models.py +422 -60
  169. mteb/models/model_implementations/mixedbread_ai_models.py +332 -0
  170. mteb/models/model_implementations/mme5_models.py +2 -1
  171. mteb/models/model_implementations/moco_models.py +15 -4
  172. mteb/models/model_implementations/mod_models.py +191 -0
  173. mteb/models/model_implementations/model2vec_models.py +27 -14
  174. mteb/models/model_implementations/moka_models.py +4 -1
  175. mteb/models/model_implementations/nbailab.py +70 -0
  176. mteb/models/model_implementations/no_instruct_sentence_models.py +3 -2
  177. mteb/models/model_implementations/nomic_models.py +173 -6
  178. mteb/models/model_implementations/nomic_models_vision.py +8 -3
  179. mteb/models/model_implementations/nvidia_llama_nemoretriever_colemb.py +32 -19
  180. mteb/models/model_implementations/nvidia_models.py +155 -20
  181. mteb/models/model_implementations/octen_models.py +254 -0
  182. mteb/models/model_implementations/openai_models.py +20 -16
  183. mteb/models/model_implementations/openclip_models.py +37 -13
  184. mteb/models/model_implementations/opensearch_neural_sparse_models.py +10 -5
  185. mteb/models/model_implementations/ops_moa_models.py +5 -3
  186. mteb/models/model_implementations/ordalietech_solon_embeddings_mini_beta_1_1.py +1 -1
  187. mteb/models/model_implementations/pawan_models.py +39 -0
  188. mteb/models/model_implementations/piccolo_models.py +9 -1
  189. mteb/models/model_implementations/pixie_models.py +56 -0
  190. mteb/models/model_implementations/promptriever_models.py +12 -8
  191. mteb/models/model_implementations/pylate_models.py +46 -12
  192. mteb/models/model_implementations/qodo_models.py +4 -2
  193. mteb/models/model_implementations/qtack_models.py +2 -1
  194. mteb/models/model_implementations/qwen3_models.py +9 -6
  195. mteb/models/model_implementations/qzhou_models.py +5 -3
  196. mteb/models/model_implementations/random_baseline.py +19 -24
  197. mteb/models/model_implementations/rasgaard_models.py +34 -0
  198. mteb/models/model_implementations/reasonir_model.py +2 -1
  199. mteb/models/model_implementations/repllama_models.py +5 -3
  200. mteb/models/model_implementations/rerankers_custom.py +15 -9
  201. mteb/models/model_implementations/rerankers_monot5_based.py +31 -31
  202. mteb/models/model_implementations/richinfoai_models.py +2 -1
  203. mteb/models/model_implementations/ru_sentence_models.py +71 -20
  204. mteb/models/model_implementations/ruri_models.py +322 -0
  205. mteb/models/model_implementations/salesforce_models.py +6 -3
  206. mteb/models/model_implementations/samilpwc_models.py +2 -1
  207. mteb/models/model_implementations/sarashina_embedding_models.py +168 -0
  208. mteb/models/model_implementations/searchmap_models.py +2 -1
  209. mteb/models/model_implementations/seed_1_6_embedding_models.py +8 -2
  210. mteb/models/model_implementations/seed_1_6_embedding_models_1215.py +625 -0
  211. mteb/models/model_implementations/seed_models.py +1 -0
  212. mteb/models/model_implementations/sentence_transformers_models.py +177 -18
  213. mteb/models/model_implementations/shuu_model.py +32 -31
  214. mteb/models/model_implementations/siglip_models.py +30 -20
  215. mteb/models/model_implementations/slm_models.py +416 -0
  216. mteb/models/model_implementations/sonar_models.py +1 -0
  217. mteb/models/model_implementations/spartan8806_atles_champion.py +34 -0
  218. mteb/models/model_implementations/stella_models.py +23 -4
  219. mteb/models/model_implementations/tarka_models.py +376 -0
  220. mteb/models/model_implementations/text2vec_models.py +9 -3
  221. mteb/models/model_implementations/ua_sentence_models.py +11 -1
  222. mteb/models/model_implementations/uae_models.py +8 -1
  223. mteb/models/model_implementations/vdr_models.py +3 -1
  224. mteb/models/model_implementations/vi_vn_models.py +45 -6
  225. mteb/models/model_implementations/vista_models.py +2 -0
  226. mteb/models/model_implementations/vlm2vec_models.py +5 -3
  227. mteb/models/model_implementations/voyage_models.py +99 -0
  228. mteb/models/model_implementations/voyage_v.py +17 -9
  229. mteb/models/model_implementations/xyz_models.py +1 -0
  230. mteb/models/model_implementations/youtu_models.py +2 -1
  231. mteb/models/model_implementations/yuan_models.py +34 -0
  232. mteb/models/model_implementations/yuan_models_en.py +58 -0
  233. mteb/models/model_meta.py +498 -29
  234. mteb/models/models_protocols.py +22 -6
  235. mteb/models/search_encoder_index/__init__.py +7 -0
  236. mteb/models/search_encoder_index/search_backend_protocol.py +50 -0
  237. mteb/models/search_encoder_index/search_indexes/__init__.py +5 -0
  238. mteb/models/search_encoder_index/search_indexes/faiss_search_index.py +160 -0
  239. mteb/models/search_wrappers.py +197 -65
  240. mteb/models/sentence_transformer_wrapper.py +52 -32
  241. mteb/models/vllm_wrapper.py +327 -0
  242. mteb/py.typed +0 -0
  243. mteb/results/benchmark_results.py +114 -65
  244. mteb/results/model_result.py +63 -26
  245. mteb/results/task_result.py +117 -77
  246. mteb/similarity_functions.py +60 -7
  247. mteb/tasks/bitext_mining/multilingual/__init__.py +2 -1
  248. mteb/tasks/bitext_mining/multilingual/bucc_bitext_mining.py +4 -2
  249. mteb/tasks/bitext_mining/multilingual/bucc_bitext_mining_fast.py +1 -1
  250. mteb/tasks/bitext_mining/multilingual/ru_sci_bench_bitext_mining.py +47 -5
  251. mteb/tasks/bitext_mining/multilingual/web_faq_bitext_mining.py +2 -6
  252. mteb/tasks/classification/ara/ajgt.py +1 -2
  253. mteb/tasks/classification/ara/hotel_review_sentiment_classification.py +1 -2
  254. mteb/tasks/classification/ara/online_store_review_sentiment_classification.py +1 -2
  255. mteb/tasks/classification/ara/restaurant_review_sentiment_classification.py +1 -2
  256. mteb/tasks/classification/ara/tweet_emotion_classification.py +1 -2
  257. mteb/tasks/classification/ara/tweet_sarcasm_classification.py +1 -2
  258. mteb/tasks/classification/ben/bengali_document_classification.py +1 -2
  259. mteb/tasks/classification/ben/bengali_hate_speech_classification.py +1 -2
  260. mteb/tasks/classification/ben/bengali_sentiment_analysis.py +1 -2
  261. mteb/tasks/classification/ces/csfdcz_movie_review_sentiment_classification.py +1 -2
  262. mteb/tasks/classification/ces/czech_product_review_sentiment_classification.py +1 -2
  263. mteb/tasks/classification/ces/czech_so_me_sentiment_classification.py +1 -2
  264. mteb/tasks/classification/dan/angry_tweets_classification.py +1 -2
  265. mteb/tasks/classification/dan/danish_political_comments_classification.py +1 -2
  266. mteb/tasks/classification/dan/ddisco_cohesion_classification.py +1 -2
  267. mteb/tasks/classification/dan/dk_hate_classification.py +2 -3
  268. mteb/tasks/classification/deu/german_politicians_twitter_sentiment_classification.py +1 -2
  269. mteb/tasks/classification/deu/ten_k_gnad_classification.py +1 -2
  270. mteb/tasks/classification/eng/amazon_polarity_classification.py +1 -2
  271. mteb/tasks/classification/eng/arxiv_classification.py +1 -2
  272. mteb/tasks/classification/eng/banking77_classification.py +1 -2
  273. mteb/tasks/classification/eng/dbpedia_classification.py +1 -2
  274. mteb/tasks/classification/eng/emotion_classification.py +1 -2
  275. mteb/tasks/classification/eng/financial_phrasebank_classification.py +1 -2
  276. mteb/tasks/classification/eng/frenk_en_classification.py +1 -2
  277. mteb/tasks/classification/eng/gtsrb_classification.py +1 -1
  278. mteb/tasks/classification/eng/imdb_classification.py +1 -2
  279. mteb/tasks/classification/eng/legal_bench_classification.py +14 -120
  280. mteb/tasks/classification/eng/news_classification.py +1 -2
  281. mteb/tasks/classification/eng/patch_camelyon_classification.py +1 -1
  282. mteb/tasks/classification/eng/patent_classification.py +1 -2
  283. mteb/tasks/classification/eng/poem_sentiment_classification.py +1 -2
  284. mteb/tasks/classification/eng/sds_eye_protection_classification.py +1 -2
  285. mteb/tasks/classification/eng/sds_gloves_classification.py +1 -2
  286. mteb/tasks/classification/eng/toxic_chat_classification.py +2 -19
  287. mteb/tasks/classification/eng/toxic_conversations_classification.py +1 -2
  288. mteb/tasks/classification/eng/tweet_sentiment_extraction_classification.py +1 -2
  289. mteb/tasks/classification/eng/tweet_topic_single_classification.py +2 -13
  290. mteb/tasks/classification/eng/ucf101_classification.py +1 -5
  291. mteb/tasks/classification/eng/wikipedia_bio_met_chem_classification.py +1 -2
  292. mteb/tasks/classification/eng/wikipedia_chem_fields_classification.py +1 -2
  293. mteb/tasks/classification/eng/wikipedia_comp_chem_spectroscopy_classification.py +1 -2
  294. mteb/tasks/classification/eng/wikipedia_crystallography_analytical_classification.py +1 -2
  295. mteb/tasks/classification/eng/wikipedia_theoretical_applied_classification.py +1 -2
  296. mteb/tasks/classification/eng/yahoo_answers_topics_classification.py +1 -2
  297. mteb/tasks/classification/eng/yelp_review_full_classification.py +1 -2
  298. mteb/tasks/classification/est/estonian_valence.py +2 -3
  299. mteb/tasks/classification/fas/fa_mteb_classification.py +7 -14
  300. mteb/tasks/classification/fil/filipino_hate_speech_classification.py +1 -2
  301. mteb/tasks/classification/fin/fin_toxicity_classification.py +2 -11
  302. mteb/tasks/classification/fra/french_book_reviews.py +1 -2
  303. mteb/tasks/classification/fra/movie_review_sentiment_classification.py +1 -2
  304. mteb/tasks/classification/guj/gujarati_news_classification.py +1 -2
  305. mteb/tasks/classification/heb/__init__.py +6 -1
  306. mteb/tasks/classification/heb/hebrew_sentiment_analysis.py +62 -4
  307. mteb/tasks/classification/hin/hindi_discourse_classification.py +1 -2
  308. mteb/tasks/classification/hin/sentiment_analysis_hindi.py +1 -2
  309. mteb/tasks/classification/hrv/frenk_hr_classification.py +1 -2
  310. mteb/tasks/classification/ind/indonesian_id_clickbait_classification.py +1 -2
  311. mteb/tasks/classification/ind/indonesian_mongabay_conservation_classification.py +1 -2
  312. mteb/tasks/classification/ita/italian_linguist_acceptability_classification.py +1 -2
  313. mteb/tasks/classification/jav/javanese_imdb_classification.py +1 -2
  314. mteb/tasks/classification/jpn/wrime_classification.py +1 -2
  315. mteb/tasks/classification/kan/kannada_news_classification.py +1 -2
  316. mteb/tasks/classification/kor/klue_tc.py +1 -2
  317. mteb/tasks/classification/kor/kor_hate_classification.py +2 -17
  318. mteb/tasks/classification/kor/kor_sarcasm_classification.py +2 -19
  319. mteb/tasks/classification/kur/kurdish_sentiment_classification.py +3 -4
  320. mteb/tasks/classification/mal/malayalam_news_classification.py +1 -2
  321. mteb/tasks/classification/mar/marathi_news_classification.py +1 -2
  322. mteb/tasks/classification/mkd/macedonian_tweet_sentiment_classification.py +1 -2
  323. mteb/tasks/classification/multilingual/catalonia_tweet_classification.py +1 -6
  324. mteb/tasks/classification/multilingual/multi_hate_classification.py +1 -4
  325. mteb/tasks/classification/multilingual/ru_sci_bench_classification.py +4 -23
  326. mteb/tasks/classification/multilingual/scala_classification.py +2 -3
  327. mteb/tasks/classification/multilingual/sib200_classification.py +1 -6
  328. mteb/tasks/classification/mya/myanmar_news.py +1 -2
  329. mteb/tasks/classification/nep/nepali_news_classification.py +1 -2
  330. mteb/tasks/classification/nld/dutch_book_review_sentiment_classification.py +4 -2
  331. mteb/tasks/classification/nld/dutch_cola_classification.py +3 -0
  332. mteb/tasks/classification/nld/dutch_government_bias_classification.py +3 -0
  333. mteb/tasks/classification/nld/dutch_news_articles_classification.py +3 -0
  334. mteb/tasks/classification/nld/dutch_sarcastic_headlines_classification.py +3 -0
  335. mteb/tasks/classification/nld/iconclass_classification.py +3 -0
  336. mteb/tasks/classification/nld/open_tender_classification.py +3 -0
  337. mteb/tasks/classification/nld/vaccin_chat_nl_classification.py +3 -0
  338. mteb/tasks/classification/nob/no_rec_classification.py +1 -2
  339. mteb/tasks/classification/nob/norwegian_parliament_classification.py +1 -2
  340. mteb/tasks/classification/ory/odia_news_classification.py +1 -2
  341. mteb/tasks/classification/pol/polish_classification.py +3 -6
  342. mteb/tasks/classification/ron/moroco.py +1 -2
  343. mteb/tasks/classification/ron/romanian_reviews_sentiment.py +1 -2
  344. mteb/tasks/classification/ron/romanian_sentiment_classification.py +1 -2
  345. mteb/tasks/classification/rus/georeview_classification.py +1 -2
  346. mteb/tasks/classification/rus/headline_classification.py +1 -2
  347. mteb/tasks/classification/rus/inappropriateness_classification.py +1 -2
  348. mteb/tasks/classification/rus/ru_reviews_classification.py +1 -2
  349. mteb/tasks/classification/rus/ru_toixic_classification_okmlcup.py +1 -2
  350. mteb/tasks/classification/rus/senti_ru_eval.py +1 -2
  351. mteb/tasks/classification/sin/sinhala_news_classification.py +1 -2
  352. mteb/tasks/classification/sin/sinhala_news_source_classification.py +1 -2
  353. mteb/tasks/classification/slk/csfdsk_movie_review_sentiment_classification.py +1 -2
  354. mteb/tasks/classification/slk/slovak_hate_speech_classification.py +1 -2
  355. mteb/tasks/classification/slk/slovak_movie_review_sentiment_classification.py +1 -2
  356. mteb/tasks/classification/slv/frenk_sl_classification.py +1 -2
  357. mteb/tasks/classification/spa/spanish_news_classification.py +1 -2
  358. mteb/tasks/classification/spa/spanish_sentiment_classification.py +1 -2
  359. mteb/tasks/classification/ssw/siswati_news_classification.py +1 -2
  360. mteb/tasks/classification/swa/swahili_news_classification.py +1 -2
  361. mteb/tasks/classification/swe/dalaj_classification.py +1 -2
  362. mteb/tasks/classification/swe/swe_rec_classification.py +1 -2
  363. mteb/tasks/classification/swe/swedish_sentiment_classification.py +1 -2
  364. mteb/tasks/classification/tam/tamil_news_classification.py +1 -2
  365. mteb/tasks/classification/tel/telugu_andhra_jyoti_news_classification.py +1 -2
  366. mteb/tasks/classification/tha/wisesight_sentiment_classification.py +1 -2
  367. mteb/tasks/classification/tsn/tswana_news_classification.py +1 -2
  368. mteb/tasks/classification/tur/__init__.py +4 -0
  369. mteb/tasks/classification/tur/turkish_constitutional_court.py +41 -0
  370. mteb/tasks/classification/tur/turkish_movie_sentiment_classification.py +1 -2
  371. mteb/tasks/classification/tur/turkish_product_sentiment_classification.py +1 -2
  372. mteb/tasks/classification/ukr/ukr_formality_classification.py +2 -15
  373. mteb/tasks/classification/urd/urdu_roman_sentiment_classification.py +1 -2
  374. mteb/tasks/classification/vie/amazon_counterfactual_vn_classification.py +1 -6
  375. mteb/tasks/classification/vie/amazon_polarity_vn_classification.py +1 -6
  376. mteb/tasks/classification/vie/amazon_reviews_vn_classification.py +1 -5
  377. mteb/tasks/classification/vie/banking77_vn_classification.py +1 -5
  378. mteb/tasks/classification/vie/emotion_vn_classification.py +1 -5
  379. mteb/tasks/classification/vie/imdb_vn_classification.py +1 -5
  380. mteb/tasks/classification/vie/massive_intent_vn_classification.py +1 -5
  381. mteb/tasks/classification/vie/massive_scenario_vn_classification.py +1 -5
  382. mteb/tasks/classification/vie/mtop_domain_vn_classification.py +1 -5
  383. mteb/tasks/classification/vie/mtop_intent_vn_classification.py +1 -5
  384. mteb/tasks/classification/vie/toxic_conversations_vn_classification.py +1 -5
  385. mteb/tasks/classification/vie/tweet_sentiment_extraction_vn_classification.py +1 -5
  386. mteb/tasks/classification/vie/vie_student_feedback_classification.py +1 -2
  387. mteb/tasks/classification/zho/cmteb_classification.py +5 -10
  388. mteb/tasks/classification/zho/yue_openrice_review_classification.py +1 -2
  389. mteb/tasks/classification/zul/isi_zulu_news_classification.py +1 -2
  390. mteb/tasks/clustering/eng/hume_wiki_cities_clustering.py +1 -1
  391. mteb/tasks/clustering/eng/wiki_cities_clustering.py +1 -1
  392. mteb/tasks/clustering/jpn/mews_c16_ja_clustering.py +1 -3
  393. mteb/tasks/clustering/multilingual/sib200_clustering_s2s.py +1 -6
  394. mteb/tasks/clustering/nld/dutch_news_articles_clustering_p2p.py +3 -0
  395. mteb/tasks/clustering/nld/dutch_news_articles_clustering_s2s.py +3 -0
  396. mteb/tasks/clustering/nld/iconclass_clustering_s2s.py +3 -0
  397. mteb/tasks/clustering/nld/open_tender_clustering_p2p.py +3 -0
  398. mteb/tasks/clustering/nld/open_tender_clustering_s2s.py +3 -0
  399. mteb/tasks/clustering/nld/vabb_clustering_p2p.py +3 -0
  400. mteb/tasks/clustering/nld/vabb_clustering_s2s.py +3 -0
  401. mteb/tasks/clustering/vie/reddit_clustering_p2p_vn.py +1 -5
  402. mteb/tasks/clustering/vie/reddit_clustering_vn.py +1 -5
  403. mteb/tasks/clustering/vie/stack_exchange_clustering_p2p_vn.py +1 -5
  404. mteb/tasks/clustering/vie/stack_exchange_clustering_vn.py +1 -5
  405. mteb/tasks/clustering/vie/twenty_newsgroups_clustering_vn.py +1 -5
  406. mteb/tasks/clustering/zho/cmteb_clustering.py +2 -2
  407. mteb/tasks/image_text_pair_classification/eng/sugar_crepe.py +1 -1
  408. mteb/tasks/multilabel_classification/ita/emit_classification.py +1 -5
  409. mteb/tasks/multilabel_classification/kor/kor_hate_speech_ml_classification.py +1 -9
  410. mteb/tasks/multilabel_classification/mlt/maltese_news_classification.py +1 -6
  411. mteb/tasks/multilabel_classification/nld/covid_disinformation_nl_multi_label_classification.py +3 -0
  412. mteb/tasks/multilabel_classification/nld/vabb_multi_label_classification.py +3 -0
  413. mteb/tasks/multilabel_classification/por/brazilian_toxic_tweets_classification.py +1 -6
  414. mteb/tasks/multilabel_classification/swe/swedish_patent_cpc_group_classification.py +1 -1
  415. mteb/tasks/multilabel_classification/swe/swedish_patent_cpc_subclass_classification.py +1 -2
  416. mteb/tasks/pair_classification/dan/talemaader_pc.py +1 -6
  417. mteb/tasks/pair_classification/eng/legal_bench_pc.py +1 -9
  418. mteb/tasks/pair_classification/nld/sick_nl_pair_classification.py +3 -0
  419. mteb/tasks/pair_classification/nld/xlwic_nl_pair_classification.py +3 -0
  420. mteb/tasks/pair_classification/rus/__init__.py +2 -2
  421. mteb/tasks/pair_classification/rus/terra.py +51 -25
  422. mteb/tasks/pair_classification/vie/sprint_duplicate_questions_pcvn.py +1 -5
  423. mteb/tasks/pair_classification/vie/twitter_sem_eval2015_pcvn.py +1 -5
  424. mteb/tasks/pair_classification/vie/twitter_url_corpus_pcvn.py +1 -5
  425. mteb/tasks/regression/multilingual/ru_sci_bench_regression.py +2 -6
  426. mteb/tasks/reranking/jpn/__init__.py +9 -1
  427. mteb/tasks/reranking/jpn/j_qa_ra_reranking_lite.py +49 -0
  428. mteb/tasks/reranking/jpn/ja_cwir_reranking_lite.py +47 -0
  429. mteb/tasks/reranking/multilingual/__init__.py +2 -0
  430. mteb/tasks/reranking/multilingual/multi_long_doc_reranking.py +70 -0
  431. mteb/tasks/reranking/multilingual/wikipedia_reranking_multilingual.py +1 -1
  432. mteb/tasks/reranking/multilingual/x_glue_wpr_reranking.py +1 -2
  433. mteb/tasks/reranking/vie/ask_ubuntu_dup_questions_vn.py +1 -5
  434. mteb/tasks/reranking/vie/sci_docs_reranking_vn.py +1 -5
  435. mteb/tasks/reranking/vie/stack_overflow_dup_questions_vn.py +1 -5
  436. mteb/tasks/retrieval/code/code_rag.py +12 -12
  437. mteb/tasks/retrieval/code/fresh_stack_retrieval.py +8 -5
  438. mteb/tasks/retrieval/dan/dan_fever_retrieval.py +1 -1
  439. mteb/tasks/retrieval/dan/tv2_nordretrieval.py +2 -2
  440. mteb/tasks/retrieval/dan/twitter_hjerne_retrieval.py +2 -2
  441. mteb/tasks/retrieval/eng/__init__.py +2 -0
  442. mteb/tasks/retrieval/eng/chemrxiv.py +33 -0
  443. mteb/tasks/retrieval/eng/cub200_i2i_retrieval.py +1 -1
  444. mteb/tasks/retrieval/eng/lit_search_retrieval.py +1 -8
  445. mteb/tasks/retrieval/eng/vidore_bench_retrieval.py +4 -0
  446. mteb/tasks/retrieval/jpn/__init__.py +8 -0
  447. mteb/tasks/retrieval/jpn/ja_cwir_retrieval.py +1 -4
  448. mteb/tasks/retrieval/jpn/ja_cwir_retrieval_lite.py +47 -0
  449. mteb/tasks/retrieval/jpn/jaqket_retrieval_lite.py +50 -0
  450. mteb/tasks/retrieval/jpn/miracl_ja_retrieval_lite.py +52 -0
  451. mteb/tasks/retrieval/jpn/mr_tydi_ja_retrieval_lite.py +48 -0
  452. mteb/tasks/retrieval/kat/georgian_faq_retrieval.py +11 -4
  453. mteb/tasks/retrieval/kor/__init__.py +16 -1
  454. mteb/tasks/retrieval/kor/kovidore2_bench_retrieval.py +142 -0
  455. mteb/tasks/retrieval/kor/squad_kor_v1_retrieval.py +47 -0
  456. mteb/tasks/retrieval/multilingual/__init__.py +24 -0
  457. mteb/tasks/retrieval/multilingual/belebele_retrieval.py +5 -4
  458. mteb/tasks/retrieval/multilingual/euro_pirq_retrieval.py +43 -0
  459. mteb/tasks/retrieval/multilingual/jina_vdr_bench_retrieval.py +56 -42
  460. mteb/tasks/retrieval/multilingual/mkqa_retrieval.py +1 -2
  461. mteb/tasks/retrieval/multilingual/mlqa_retrieval.py +1 -4
  462. mteb/tasks/retrieval/multilingual/multi_long_doc_retrieval.py +1 -2
  463. mteb/tasks/retrieval/multilingual/public_health_qa_retrieval.py +9 -4
  464. mteb/tasks/retrieval/multilingual/ru_sci_bench_retrieval.py +2 -12
  465. mteb/tasks/retrieval/multilingual/vidore2_bench_retrieval.py +4 -2
  466. mteb/tasks/retrieval/multilingual/vidore3_bench_retrieval.py +389 -0
  467. mteb/tasks/retrieval/nld/__init__.py +8 -4
  468. mteb/tasks/retrieval/nld/argu_ana_nl_retrieval.py +46 -27
  469. mteb/tasks/retrieval/nld/bbsard_nl_retrieval.py +3 -0
  470. mteb/tasks/retrieval/nld/dutch_news_articles_retrieval.py +3 -0
  471. mteb/tasks/retrieval/nld/legal_qa_nl_retrieval.py +3 -0
  472. mteb/tasks/retrieval/nld/nf_corpus_nl_retrieval.py +42 -25
  473. mteb/tasks/retrieval/nld/open_tender_retrieval.py +3 -0
  474. mteb/tasks/retrieval/nld/sci_fact_nl_retrieval.py +42 -24
  475. mteb/tasks/retrieval/nld/scidocsnl_retrieval.py +44 -27
  476. mteb/tasks/retrieval/nld/vabb_retrieval.py +3 -0
  477. mteb/tasks/retrieval/nob/norquad.py +2 -2
  478. mteb/tasks/retrieval/nob/snl_retrieval.py +2 -2
  479. mteb/tasks/retrieval/slk/slovak_sum_retrieval.py +1 -7
  480. mteb/tasks/retrieval/tur/tur_hist_quad.py +1 -1
  481. mteb/tasks/retrieval/vie/__init__.py +14 -6
  482. mteb/tasks/retrieval/vie/argu_ana_vn_retrieval.py +1 -5
  483. mteb/tasks/retrieval/vie/climate_fevervn_retrieval.py +40 -5
  484. mteb/tasks/retrieval/vie/cqa_dupstack_android_vn_retrieval.py +1 -5
  485. mteb/tasks/retrieval/vie/cqa_dupstack_gis_vn_retrieval.py +1 -5
  486. mteb/tasks/retrieval/vie/cqa_dupstack_mathematica_vn_retrieval.py +1 -5
  487. mteb/tasks/retrieval/vie/cqa_dupstack_physics_vn_retrieval.py +1 -5
  488. mteb/tasks/retrieval/vie/cqa_dupstack_programmers_vn_retrieval.py +1 -5
  489. mteb/tasks/retrieval/vie/cqa_dupstack_stats_vn_retrieval.py +1 -5
  490. mteb/tasks/retrieval/vie/cqa_dupstack_tex_vn_retrieval.py +1 -5
  491. mteb/tasks/retrieval/vie/cqa_dupstack_unix_vn_retrieval.py +1 -5
  492. mteb/tasks/retrieval/vie/cqa_dupstack_webmasters_vn_retrieval.py +1 -5
  493. mteb/tasks/retrieval/vie/cqa_dupstack_wordpress_vn_retrieval.py +1 -5
  494. mteb/tasks/retrieval/vie/db_pedia_vn_retrieval.py +40 -5
  495. mteb/tasks/retrieval/vie/fevervn_retrieval.py +40 -7
  496. mteb/tasks/retrieval/vie/fi_qa2018_vn_retrieval.py +1 -5
  497. mteb/tasks/retrieval/vie/green_node_table_markdown_retrieval.py +16 -1
  498. mteb/tasks/retrieval/vie/hotpot_qavn_retrieval.py +40 -6
  499. mteb/tasks/retrieval/vie/msmarcovn_retrieval.py +49 -5
  500. mteb/tasks/retrieval/vie/nf_corpus_vn_retrieval.py +1 -5
  501. mteb/tasks/retrieval/vie/nqvn_retrieval.py +40 -5
  502. mteb/tasks/retrieval/vie/quora_vn_retrieval.py +1 -6
  503. mteb/tasks/retrieval/vie/sci_fact_vn_retrieval.py +1 -5
  504. mteb/tasks/retrieval/vie/scidocsvn_retrieval.py +1 -6
  505. mteb/tasks/retrieval/vie/touche2020_vn_retrieval.py +1 -5
  506. mteb/tasks/retrieval/vie/treccovidvn_retrieval.py +1 -5
  507. mteb/tasks/retrieval/vie/tvpl_retrieval.py +42 -0
  508. mteb/tasks/retrieval/vie/zac_legal_text_retrieval.py +15 -1
  509. mteb/tasks/sts/nld/sick_nl_sts.py +1 -0
  510. mteb/tasks/sts/vie/biosses_stsvn.py +1 -5
  511. mteb/tasks/sts/vie/sickr_stsvn.py +1 -5
  512. mteb/tasks/sts/vie/sts_benchmark_stsvn.py +1 -5
  513. mteb/tasks/zeroshot_classification/eng/gtsrb.py +1 -1
  514. mteb/tasks/zeroshot_classification/eng/patch_camelyon.py +1 -1
  515. mteb/tasks/zeroshot_classification/eng/ucf101.py +1 -5
  516. mteb/types/__init__.py +2 -0
  517. mteb/types/_encoder_io.py +19 -2
  518. mteb/types/_result.py +2 -1
  519. mteb/types/statistics.py +9 -3
  520. {mteb-2.1.4.dist-info → mteb-2.7.2.dist-info}/METADATA +25 -8
  521. {mteb-2.1.4.dist-info → mteb-2.7.2.dist-info}/RECORD +525 -438
  522. mteb/models/model_implementations/mxbai_models.py +0 -102
  523. mteb/models/model_implementations/nb_sbert.py +0 -25
  524. {mteb-2.1.4.dist-info → mteb-2.7.2.dist-info}/WHEEL +0 -0
  525. {mteb-2.1.4.dist-info → mteb-2.7.2.dist-info}/entry_points.txt +0 -0
  526. {mteb-2.1.4.dist-info → mteb-2.7.2.dist-info}/licenses/LICENSE +0 -0
  527. {mteb-2.1.4.dist-info → mteb-2.7.2.dist-info}/top_level.txt +0 -0
@@ -465,8 +465,7 @@ class SynPerTextToneClassification(AbsTaskClassification):
465
465
  class SynPerTextToneClassificationV2(AbsTaskClassification):
466
466
  metadata = TaskMetadata(
467
467
  name="SynPerTextToneClassification.v2",
468
- description="""Persian Text Tone
469
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
468
+ description="Persian Text Tone This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
470
469
  reference="https://mcinext.com/",
471
470
  dataset={
472
471
  "path": "mteb/syn_per_text_tone",
@@ -495,8 +494,7 @@ class SynPerTextToneClassificationV2(AbsTaskClassification):
495
494
  class SynPerTextToneClassificationV3(AbsTaskClassification):
496
495
  metadata = TaskMetadata(
497
496
  name="SynPerTextToneClassification.v3",
498
- description="""This version of the Persian text tone classification dataset is an improved version of its predecessors.
499
- It excludes several classes identified as having low-quality data, leading to a more reliable benchmark.""",
497
+ description="This version of the Persian text tone classification dataset is an improved version of its predecessors. It excludes several classes identified as having low-quality data, leading to a more reliable benchmark.",
500
498
  reference="https://mcinext.com/",
501
499
  dataset={
502
500
  "path": "MCINext/synthetic-persian-text-tone-classification-v3",
@@ -552,8 +550,7 @@ class SIDClassification(AbsTaskClassification):
552
550
  class SIDClassificationV2(AbsTaskClassification):
553
551
  metadata = TaskMetadata(
554
552
  name="SIDClassification.v2",
555
- description="""SID Classification
556
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
553
+ description="SID Classification This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
557
554
  reference="https://mcinext.com/",
558
555
  dataset={
559
556
  "path": "mteb/sid",
@@ -612,8 +609,7 @@ class DeepSentiPers(AbsTaskClassification):
612
609
  class DeepSentiPersV2(AbsTaskClassification):
613
610
  metadata = TaskMetadata(
614
611
  name="DeepSentiPers.v2",
615
- description="""Persian Sentiment Analysis Dataset
616
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
612
+ description="Persian Sentiment Analysis Dataset This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
617
613
  reference="https://github.com/JoyeBright/DeepSentiPers",
618
614
  dataset={
619
615
  "path": "mteb/deep_senti_pers",
@@ -669,8 +665,7 @@ class PersianTextEmotion(AbsTaskClassification):
669
665
  class PersianTextEmotionV2(AbsTaskClassification):
670
666
  metadata = TaskMetadata(
671
667
  name="PersianTextEmotion.v2",
672
- description="""Emotion is a Persian dataset with six basic emotions: anger, fear, joy, love, sadness, and surprise.
673
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
668
+ description="Emotion is a Persian dataset with six basic emotions: anger, fear, joy, love, sadness, and surprise. This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
674
669
  reference="https://huggingface.co/datasets/SeyedAli/Persian-Text-Emotion",
675
670
  dataset={
676
671
  "path": "mteb/persian_text_emotion",
@@ -726,8 +721,7 @@ class SentimentDKSF(AbsTaskClassification):
726
721
  class SentimentDKSFV2(AbsTaskClassification):
727
722
  metadata = TaskMetadata(
728
723
  name="SentimentDKSF.v2",
729
- description="""The Sentiment DKSF (Digikala/Snappfood comments) is a dataset for sentiment analysis.
730
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
724
+ description="The Sentiment DKSF (Digikala/Snappfood comments) is a dataset for sentiment analysis. This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
731
725
  reference="https://github.com/hezarai/hezar",
732
726
  dataset={
733
727
  "path": "mteb/sentiment_dksf",
@@ -786,8 +780,7 @@ class NLPTwitterAnalysisClassification(AbsTaskClassification):
786
780
  class NLPTwitterAnalysisClassificationV2(AbsTaskClassification):
787
781
  metadata = TaskMetadata(
788
782
  name="NLPTwitterAnalysisClassification.v2",
789
- description="""Twitter Analysis Classification
790
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
783
+ description="Twitter Analysis Classification This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
791
784
  reference="https://huggingface.co/datasets/hamedhf/nlp_twitter_analysis/tree/main",
792
785
  dataset={
793
786
  "path": "mteb/nlp_twitter_analysis",
@@ -44,8 +44,7 @@ class FilipinoHateSpeechClassification(AbsTaskClassification):
44
44
  class FilipinoHateSpeechClassificationV2(AbsTaskClassification):
45
45
  metadata = TaskMetadata(
46
46
  name="FilipinoHateSpeechClassification.v2",
47
- description="""Filipino Twitter dataset for sentiment classification.
48
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
47
+ description="Filipino Twitter dataset for sentiment classification. This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
49
48
  reference="https://pcj.csp.org.ph/index.php/pcj/issue/download/29/PCJ%20V14%20N1%20pp1-14%202019",
50
49
  dataset={
51
50
  "path": "mteb/filipino_hate_speech",
@@ -5,11 +5,7 @@ from mteb.abstasks.task_metadata import TaskMetadata
5
5
  class FinToxicityClassification(AbsTaskClassification):
6
6
  metadata = TaskMetadata(
7
7
  name="FinToxicityClassification",
8
- description="""
9
- This dataset is a DeepL -based machine translated version of the Jigsaw toxicity dataset for Finnish. The dataset is originally from a Kaggle competition https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge/data.
10
- The original dataset poses a multi-label text classification problem and includes the labels identity_attack, insult, obscene, severe_toxicity, threat and toxicity.
11
- Here adapted for toxicity classification, which is the most represented class.
12
- """,
8
+ description="This dataset is a DeepL -based machine translated version of the Jigsaw toxicity dataset for Finnish. The dataset is originally from a Kaggle competition https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge/data. The original dataset poses a multi-label text classification problem and includes the labels identity_attack, insult, obscene, severe_toxicity, threat and toxicity. Here adapted for toxicity classification, which is the most represented class.",
13
9
  dataset={
14
10
  "path": "TurkuNLP/jigsaw_toxicity_pred_fi",
15
11
  "revision": "6e7340e6be87124f319e25290778760c14df64d3",
@@ -57,12 +53,7 @@ Laippala, Veronika},
57
53
  class FinToxicityClassificationV2(AbsTaskClassification):
58
54
  metadata = TaskMetadata(
59
55
  name="FinToxicityClassification.v2",
60
- description="""
61
- This dataset is a DeepL -based machine translated version of the Jigsaw toxicity dataset for Finnish. The dataset is originally from a Kaggle competition https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge/data.
62
- The original dataset poses a multi-label text classification problem and includes the labels identity_attack, insult, obscene, severe_toxicity, threat and toxicity.
63
- Here adapted for toxicity classification, which is the most represented class.
64
-
65
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
56
+ description="This dataset is a DeepL -based machine translated version of the Jigsaw toxicity dataset for Finnish. The dataset is originally from a Kaggle competition https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge/data. The original dataset poses a multi-label text classification problem and includes the labels identity_attack, insult, obscene, severe_toxicity, threat and toxicity. Here adapted for toxicity classification, which is the most represented class. This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
66
57
  dataset={
67
58
  "path": "mteb/fin_toxicity",
68
59
  "revision": "1deba6e874be1d5632a4ac0d1fb71f4bc3dea0d6",
@@ -43,8 +43,7 @@ class FrenchBookReviewsV2(AbsTaskClassification):
43
43
  "path": "mteb/french_book_reviews",
44
44
  "revision": "71d755fd76073533c3d0c262f6b542eb0fa7ce96",
45
45
  },
46
- description="""It is a French book reviews dataset containing a huge number of reader reviews on French books. Each review is pared with a rating that ranges from 0.5 to 5 (with 0.5 increment).
47
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
46
+ description="It is a French book reviews dataset containing a huge number of reader reviews on French books. Each review is pared with a rating that ranges from 0.5 to 5 (with 0.5 increment). This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
48
47
  reference="https://huggingface.co/datasets/Abirate/french_book_reviews",
49
48
  type="Classification",
50
49
  category="t2c",
@@ -49,8 +49,7 @@ class MovieReviewSentimentClassificationV2(AbsTaskClassification):
49
49
  "path": "mteb/movie_review_sentiment",
50
50
  "revision": "4e182033cbfe75ae0556cd640d028986be82afd8",
51
51
  },
52
- description="""The Allociné dataset is a French-language dataset for sentiment analysis that contains movie reviews produced by the online community of the Allociné.fr website.
53
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
52
+ description="The Allociné dataset is a French-language dataset for sentiment analysis that contains movie reviews produced by the online community of the Allociné.fr website. This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
54
53
  reference="https://github.com/TheophileBlard/french-sentiment-analysis-with-bert",
55
54
  type="Classification",
56
55
  category="t2c",
@@ -35,8 +35,7 @@ class GujaratiNewsClassification(AbsTaskClassification):
35
35
  class GujaratiNewsClassificationV2(AbsTaskClassification):
36
36
  metadata = TaskMetadata(
37
37
  name="GujaratiNewsClassification.v2",
38
- description="""A Gujarati dataset for 3-class classification of Gujarati news articles
39
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
38
+ description="A Gujarati dataset for 3-class classification of Gujarati news articles This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
40
39
  reference="https://github.com/goru001/nlp-for-gujarati",
41
40
  dataset={
42
41
  "path": "mteb/gujarati_news",
@@ -1,6 +1,11 @@
1
1
  from .hebrew_sentiment_analysis import (
2
2
  HebrewSentimentAnalysis,
3
3
  HebrewSentimentAnalysisV2,
4
+ HebrewSentimentAnalysisV3,
4
5
  )
5
6
 
6
- __all__ = ["HebrewSentimentAnalysis", "HebrewSentimentAnalysisV2"]
7
+ __all__ = [
8
+ "HebrewSentimentAnalysis",
9
+ "HebrewSentimentAnalysisV2",
10
+ "HebrewSentimentAnalysisV3",
11
+ ]
@@ -9,7 +9,12 @@ class HebrewSentimentAnalysis(AbsTaskClassification):
9
9
  "path": "mteb/HebrewSentimentAnalysis",
10
10
  "revision": "03eb0996c8234e0d8cd7206bf4763815deda12ed",
11
11
  },
12
- description="HebrewSentiment is a data set consists of 12,804 user comments to posts on the official Facebook page of Israel’s president, Mr. Reuven Rivlin. In October 2015, we used the open software application Netvizz (Rieder, 2013) to scrape all the comments to all of the president’s posts in the period of June – August 2014, the first three months of Rivlin’s presidency.2 While the president’s posts aimed at reconciling tensions and called for tolerance and empathy, the sentiment expressed in the comments to the president’s posts was polarized between citizens who warmly thanked the president, and citizens that fiercely critiqued his policy.",
12
+ description=(
13
+ "HebrewSentiment is a data set consists of 12,804 user comments to posts on the official Facebook page of Israel’s president, Mr. Reuven Rivlin. "
14
+ "In October 2015, we used the open software application Netvizz (Rieder, 2013) to scrape all the comments to all of the president’s posts in the period of June – August 2014, "
15
+ "the first three months of Rivlin’s presidency.2 While the president’s posts aimed at reconciling tensions and called for tolerance and empathy, "
16
+ "the sentiment expressed in the comments to the president’s posts was polarized between citizens who warmly thanked the president, and citizens that fiercely critiqued his policy. "
17
+ ),
13
18
  reference="https://huggingface.co/datasets/hebrew_sentiment",
14
19
  type="Classification",
15
20
  category="t2c",
@@ -37,7 +42,7 @@ class HebrewSentimentAnalysis(AbsTaskClassification):
37
42
  year = {2018},
38
43
  }
39
44
  """,
40
- superseded_by="HebrewSentimentAnalysis.v2",
45
+ superseded_by="HebrewSentimentAnalysis.v3",
41
46
  )
42
47
 
43
48
 
@@ -49,8 +54,61 @@ class HebrewSentimentAnalysisV2(AbsTaskClassification):
49
54
  "revision": "7ecd049fc8ac0d6f0a0121c8ff9fe44ea5bd935b",
50
55
  "name": "morph",
51
56
  },
52
- description="""HebrewSentiment is a data set consists of 12,804 user comments to posts on the official Facebook page of Israel’s president, Mr. Reuven Rivlin. In October 2015, we used the open software application Netvizz (Rieder, 2013) to scrape all the comments to all of the president’s posts in the period of June – August 2014, the first three months of Rivlin’s presidency.2 While the president’s posts aimed at reconciling tensions and called for tolerance and empathy, the sentiment expressed in the comments to the president’s posts was polarized between citizens who warmly thanked the president, and citizens that fiercely critiqued his policy.
53
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
57
+ description=(
58
+ "HebrewSentiment is a data set consists of 12,804 user comments to posts on the official Facebook page of Israel’s president, Mr. Reuven Rivlin. "
59
+ "In October 2015, we used the open software application Netvizz (Rieder, 2013) to scrape all the comments to all of the president’s posts in the period of June – August 2014, "
60
+ "the first three months of Rivlin’s presidency.2 While the president’s posts aimed at reconciling tensions and called for tolerance and empathy, "
61
+ "the sentiment expressed in the comments to the president’s posts was polarized between citizens who warmly thanked the president, and citizens that fiercely critiqued his policy. "
62
+ "This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)"
63
+ ),
64
+ reference="https://huggingface.co/datasets/hebrew_sentiment",
65
+ type="Classification",
66
+ category="t2c",
67
+ modalities=["text"],
68
+ eval_splits=["test"],
69
+ eval_langs=["heb-Hebr"],
70
+ main_score="accuracy",
71
+ date=("2015-10-01", "2015-10-31"),
72
+ domains=["Reviews", "Written"],
73
+ task_subtypes=["Sentiment/Hate speech"],
74
+ license="mit",
75
+ annotations_creators="expert-annotated",
76
+ dialect=[],
77
+ sample_creation="found",
78
+ bibtex_citation=r"""
79
+ @inproceedings{amram-etal-2018-representations,
80
+ address = {Santa Fe, New Mexico, USA},
81
+ author = {Amram, Adam and Ben David, Anat and Tsarfaty, Reut},
82
+ booktitle = {Proceedings of the 27th International Conference on Computational Linguistics},
83
+ month = aug,
84
+ pages = {2242--2252},
85
+ publisher = {Association for Computational Linguistics},
86
+ title = {Representations and Architectures in Neural Sentiment Analysis for Morphologically Rich Languages: A Case Study from {M}odern {H}ebrew},
87
+ url = {https://www.aclweb.org/anthology/C18-1190},
88
+ year = {2018},
89
+ }
90
+ """,
91
+ adapted_from=["HebrewSentimentAnalysis"],
92
+ superseded_by="HebrewSentimentAnalysis.v3",
93
+ )
94
+
95
+
96
+ class HebrewSentimentAnalysisV3(AbsTaskClassification):
97
+ label_column_name = "labels"
98
+ metadata = TaskMetadata(
99
+ name="HebrewSentimentAnalysis.v3",
100
+ dataset={
101
+ "path": "mteb/HebrewSentimentAnalysisV4",
102
+ "revision": "aa0b83c4b16cd28daf7c41ef3402e3ffe9c70c59",
103
+ },
104
+ description=(
105
+ "HebrewSentiment is a data set consists of 12,804 user comments to posts on the official Facebook page of Israel’s president, Mr. Reuven Rivlin. "
106
+ "In October 2015, we used the open software application Netvizz (Rieder, 2013) to scrape all the comments to all of the president’s posts in the period of June – August 2014, "
107
+ "the first three months of Rivlin’s presidency.2 While the president’s posts aimed at reconciling tensions and called for tolerance and empathy, "
108
+ "the sentiment expressed in the comments to the president’s posts was polarized between citizens who warmly thanked the president, and citizens that fiercely critiqued his policy. "
109
+ "This version corrects texts (took pre-tokenized) [more details in this thread](https://huggingface.co/datasets/mteb/HebrewSentimentAnalysis/discussions/2). "
110
+ "This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)"
111
+ ),
54
112
  reference="https://huggingface.co/datasets/hebrew_sentiment",
55
113
  type="Classification",
56
114
  category="t2c",
@@ -59,8 +59,7 @@ class HindiDiscourseClassificationV2(AbsTaskClassification):
59
59
  "path": "mteb/hindi_discourse",
60
60
  "revision": "9d10173a3df9858adc90711d8da9abf3df0a1259",
61
61
  },
62
- description="""A Hindi Discourse dataset in Hindi with values for coherence.
63
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
62
+ description="A Hindi Discourse dataset in Hindi with values for coherence. This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
64
63
  reference="https://aclanthology.org/2020.lrec-1.149/",
65
64
  type="Classification",
66
65
  category="t2c",
@@ -46,8 +46,7 @@ class SentimentAnalysisHindi(AbsTaskClassification):
46
46
  class SentimentAnalysisHindiV2(AbsTaskClassification):
47
47
  metadata = TaskMetadata(
48
48
  name="SentimentAnalysisHindi.v2",
49
- description="""Hindi Sentiment Analysis Dataset
50
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
49
+ description="Hindi Sentiment Analysis Dataset This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
51
50
  reference="https://huggingface.co/datasets/OdiaGenAI/sentiment_analysis_hindi",
52
51
  dataset={
53
52
  "path": "mteb/sentiment_analysis_hindi",
@@ -42,8 +42,7 @@ class FrenkHrClassification(AbsTaskClassification):
42
42
  class FrenkHrClassificationV2(AbsTaskClassification):
43
43
  metadata = TaskMetadata(
44
44
  name="FrenkHrClassification.v2",
45
- description="""Croatian subset of the FRENK dataset
46
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
45
+ description="Croatian subset of the FRENK dataset This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
47
46
  dataset={
48
47
  "path": "mteb/frenk_hr",
49
48
  "revision": "09f90d0bee34d5e703caed26737166591a8f12b9",
@@ -57,8 +57,7 @@ class IndonesianIdClickbaitClassificationV2(AbsTaskClassification):
57
57
  "path": "mteb/indonesian_id_clickbait",
58
58
  "revision": "a54158a1b437a85e1982a70d0c57a69c69d0a5b8",
59
59
  },
60
- description="""The CLICK-ID dataset is a collection of Indonesian news headlines that was collected from 12 local online news publishers.
61
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
60
+ description="The CLICK-ID dataset is a collection of Indonesian news headlines that was collected from 12 local online news publishers. This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
62
61
  reference="http://www.sciencedirect.com/science/article/pii/S2352340920311252",
63
62
  type="Classification",
64
63
  category="t2c",
@@ -104,8 +104,7 @@ Purwarianti, Ayu},
104
104
  class IndonesianMongabayConservationClassificationV2(AbsTaskClassification):
105
105
  metadata = TaskMetadata(
106
106
  name="IndonesianMongabayConservationClassification.v2",
107
- description="""Conservation dataset that was collected from mongabay.co.id contains topic-classification task (multi-label format) and sentiment classification. This task only covers sentiment analysis (positive, neutral negative)
108
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
107
+ description="Conservation dataset that was collected from mongabay.co.id contains topic-classification task (multi-label format) and sentiment classification. This task only covers sentiment analysis (positive, neutral negative) This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
109
108
  reference="https://aclanthology.org/2023.sealp-1.4/",
110
109
  dataset={
111
110
  "path": "mteb/indonesian_mongabay_conservation",
@@ -52,8 +52,7 @@ class ItalianLinguisticAcceptabilityClassificationV2(AbsTaskClassification):
52
52
  "path": "mteb/italian_linguistic_acceptability",
53
53
  "revision": "4550151a0f0433e65df172c088427063e376ce81",
54
54
  },
55
- description="""An Italian Corpus of Linguistic Acceptability taken from linguistic literature with a binary annotation made by the original authors themselves.
56
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
55
+ description="An Italian Corpus of Linguistic Acceptability taken from linguistic literature with a binary annotation made by the original authors themselves. This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
57
56
  reference="https://aclanthology.org/2021.findings-emnlp.250/",
58
57
  type="Classification",
59
58
  category="t2c",
@@ -41,8 +41,7 @@ class JavaneseIMDBClassification(AbsTaskClassification):
41
41
  class JavaneseIMDBClassificationV2(AbsTaskClassification):
42
42
  metadata = TaskMetadata(
43
43
  name="JavaneseIMDBClassification.v2",
44
- description="""Large Movie Review Dataset translated to Javanese. This is a dataset for binary sentiment classification containing substantially more data than previous benchmark datasets.
45
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
44
+ description="Large Movie Review Dataset translated to Javanese. This is a dataset for binary sentiment classification containing substantially more data than previous benchmark datasets. This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
46
45
  reference="https://github.com/w11wo/nlp-datasets#javanese-imdb",
47
46
  dataset={
48
47
  "path": "mteb/javanese_imdb",
@@ -63,8 +63,7 @@ class WRIMEClassificationV2(AbsTaskClassification):
63
63
  "revision": "6687c3bd031a0b144189958bad57db0b95a48dec",
64
64
  "name": "ver2",
65
65
  },
66
- description="""A dataset of Japanese social network rated for sentiment
67
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
66
+ description="A dataset of Japanese social network rated for sentiment This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
68
67
  reference="https://aclanthology.org/2021.naacl-main.169/",
69
68
  type="Classification",
70
69
  category="t2c",
@@ -45,8 +45,7 @@ class KannadaNewsClassification(AbsTaskClassification):
45
45
  class KannadaNewsClassificationV2(AbsTaskClassification):
46
46
  metadata = TaskMetadata(
47
47
  name="KannadaNewsClassification.v2",
48
- description="""The Kannada news dataset contains only the headlines of news article in three categories: Entertainment, Tech, and Sports. The data set contains around 6300 news article headlines which are collected from Kannada news websites. The data set has been cleaned and contains train and test set using which can be used to benchmark topic classification models in Kannada.
49
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
48
+ description="The Kannada news dataset contains only the headlines of news article in three categories: Entertainment, Tech, and Sports. The data set contains around 6300 news article headlines which are collected from Kannada news websites. The data set has been cleaned and contains train and test set using which can be used to benchmark topic classification models in Kannada. This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
50
49
  dataset={
51
50
  "path": "mteb/kannada_news",
52
51
  "revision": "bf9c88b5bd4e5b349a39492e5298a928ab509a92",
@@ -62,8 +62,7 @@ class KlueTCV2(AbsTaskClassification):
62
62
  "name": "ynat",
63
63
  "revision": "c0e3d82ac01def9bfd92dffb1e7dde619b50d0a2",
64
64
  },
65
- description="""Topic classification dataset of human-annotated news headlines. Part of the Korean Language Understanding Evaluation (KLUE).
66
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
65
+ description="Topic classification dataset of human-annotated news headlines. Part of the Korean Language Understanding Evaluation (KLUE). This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
67
66
  reference="https://arxiv.org/abs/2105.09680",
68
67
  type="Classification",
69
68
  category="t2c",
@@ -5,14 +5,7 @@ from mteb.abstasks.task_metadata import TaskMetadata
5
5
  class KorHateClassification(AbsTaskClassification):
6
6
  metadata = TaskMetadata(
7
7
  name="KorHateClassification",
8
- description="""The dataset was created to provide the first human-labeled Korean corpus for
9
- toxic speech detection from a Korean online entertainment news aggregator. Recently,
10
- two young Korean celebrities suffered from a series of tragic incidents that led to two
11
- major Korean web portals to close the comments section on their platform. However, this only
12
- serves as a temporary solution, and the fundamental issue has not been solved yet. This dataset
13
- hopes to improve Korean hate speech detection. Annotation was performed by 32 annotators,
14
- consisting of 29 annotators from the crowdsourcing platform DeepNatural AI and three NLP researchers.
15
- """,
8
+ description="The dataset was created to provide the first human-labeled Korean corpus for toxic speech detection from a Korean online entertainment news aggregator. Recently, two young Korean celebrities suffered from a series of tragic incidents that led to two major Korean web portals to close the comments section on their platform. However, this only serves as a temporary solution, and the fundamental issue has not been solved yet. This dataset hopes to improve Korean hate speech detection. Annotation was performed by 32 annotators, consisting of 29 annotators from the crowdsourcing platform DeepNatural AI and three NLP researchers.",
16
9
  dataset={
17
10
  "path": "mteb/KorHateClassification",
18
11
  "revision": "a4e70398c3689a5f55cd1f4a447d8d2da0a7dd1e",
@@ -48,15 +41,7 @@ class KorHateClassification(AbsTaskClassification):
48
41
  class KorHateClassificationV2(AbsTaskClassification):
49
42
  metadata = TaskMetadata(
50
43
  name="KorHateClassification.v2",
51
- description="""The dataset was created to provide the first human-labeled Korean corpus for
52
- toxic speech detection from a Korean online entertainment news aggregator. Recently,
53
- two young Korean celebrities suffered from a series of tragic incidents that led to two
54
- major Korean web portals to close the comments section on their platform. However, this only
55
- serves as a temporary solution, and the fundamental issue has not been solved yet. This dataset
56
- hopes to improve Korean hate speech detection. Annotation was performed by 32 annotators,
57
- consisting of 29 annotators from the crowdsourcing platform DeepNatural AI and three NLP researchers.
58
-
59
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
44
+ description="The dataset was created to provide the first human-labeled Korean corpus for toxic speech detection from a Korean online entertainment news aggregator. Recently, two young Korean celebrities suffered from a series of tragic incidents that led to two major Korean web portals to close the comments section on their platform. However, this only serves as a temporary solution, and the fundamental issue has not been solved yet. This dataset hopes to improve Korean hate speech detection. Annotation was performed by 32 annotators, consisting of 29 annotators from the crowdsourcing platform DeepNatural AI and three NLP researchers. This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
60
45
  dataset={
61
46
  "path": "mteb/kor_hate",
62
47
  "revision": "5d64e6dcbe9204c934e9a3852b1130a6f2d51ad4",
@@ -5,15 +5,7 @@ from mteb.abstasks.task_metadata import TaskMetadata
5
5
  class KorSarcasmClassification(AbsTaskClassification):
6
6
  metadata = TaskMetadata(
7
7
  name="KorSarcasmClassification",
8
- description="""
9
- The Korean Sarcasm Dataset was created to detect sarcasm in text, which can significantly alter the original
10
- meaning of a sentence. 9319 tweets were collected from Twitter and labeled for sarcasm or not_sarcasm. These
11
- tweets were gathered by querying for: irony sarcastic, and
12
- sarcasm.
13
- The dataset was created by gathering HTML data from Twitter. Queries for hashtags that include sarcasm
14
- and variants of it were used to return tweets. It was preprocessed by removing the keyword
15
- hashtag, urls and mentions of the user to preserve anonymity.
16
- """,
8
+ description="The Korean Sarcasm Dataset was created to detect sarcasm in text, which can significantly alter the original meaning of a sentence. 9319 tweets were collected from Twitter and labeled for sarcasm or not_sarcasm. These tweets were gathered by querying for: irony sarcastic, and sarcasm. The dataset was created by gathering HTML data from Twitter. Queries for hashtags that include sarcasm and variants of it were used to return tweets. It was preprocessed by removing the keyword hashtag, urls and mentions of the user to preserve anonymity.",
17
9
  dataset={
18
10
  "path": "mteb/KorSarcasmClassification",
19
11
  "revision": "6701f384372c04aa8c64b10582e72eb84135a1d4",
@@ -49,16 +41,7 @@ class KorSarcasmClassification(AbsTaskClassification):
49
41
  class KorSarcasmClassificationV2(AbsTaskClassification):
50
42
  metadata = TaskMetadata(
51
43
  name="KorSarcasmClassification.v2",
52
- description="""
53
- The Korean Sarcasm Dataset was created to detect sarcasm in text, which can significantly alter the original
54
- meaning of a sentence. 9319 tweets were collected from Twitter and labeled for sarcasm or not_sarcasm. These
55
- tweets were gathered by querying for: irony sarcastic, and
56
- sarcasm.
57
- The dataset was created by gathering HTML data from Twitter. Queries for hashtags that include sarcasm
58
- and variants of it were used to return tweets. It was preprocessed by removing the keyword
59
- hashtag, urls and mentions of the user to preserve anonymity.
60
-
61
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
44
+ description="The Korean Sarcasm Dataset was created to detect sarcasm in text, which can significantly alter the original meaning of a sentence. 9319 tweets were collected from Twitter and labeled for sarcasm or not_sarcasm. These tweets were gathered by querying for: irony sarcastic, and sarcasm. The dataset was created by gathering HTML data from Twitter. Queries for hashtags that include sarcasm and variants of it were used to return tweets. It was preprocessed by removing the keyword hashtag, urls and mentions of the user to preserve anonymity. This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
62
45
  dataset={
63
46
  "path": "mteb/kor_sarcasm",
64
47
  "revision": "0e5e17b4dba569776e445f5639ba13dc406b2b0e",
@@ -25,7 +25,7 @@ class KurdishSentimentClassification(AbsTaskClassification):
25
25
  dialect=["Sorani"],
26
26
  sample_creation="found",
27
27
  bibtex_citation=r"""
28
- @article{article,
28
+ @article{badawi2024kurdisent,
29
29
  author = {Badawi, Soran and Kazemi, Arefeh and Rezaie, Vali},
30
30
  doi = {10.1007/s10579-023-09716-6},
31
31
  journal = {Language Resources and Evaluation},
@@ -42,8 +42,7 @@ class KurdishSentimentClassification(AbsTaskClassification):
42
42
  class KurdishSentimentClassificationV2(AbsTaskClassification):
43
43
  metadata = TaskMetadata(
44
44
  name="KurdishSentimentClassification.v2",
45
- description="""Kurdish Sentiment Dataset
46
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
45
+ description="Kurdish Sentiment Dataset This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
47
46
  reference="https://link.springer.com/article/10.1007/s10579-023-09716-6",
48
47
  dataset={
49
48
  "path": "mteb/kurdish_sentiment",
@@ -63,7 +62,7 @@ class KurdishSentimentClassificationV2(AbsTaskClassification):
63
62
  dialect=["Sorani"],
64
63
  sample_creation="found",
65
64
  bibtex_citation=r"""
66
- @article{article,
65
+ @article{badawi2024kurdisent,
67
66
  author = {Badawi, Soran and Kazemi, Arefeh and Rezaie, Vali},
68
67
  doi = {10.1007/s10579-023-09716-6},
69
68
  journal = {Language Resources and Evaluation},
@@ -42,8 +42,7 @@ class MalayalamNewsClassification(AbsTaskClassification):
42
42
  class MalayalamNewsClassificationV2(AbsTaskClassification):
43
43
  metadata = TaskMetadata(
44
44
  name="MalayalamNewsClassification.v2",
45
- description="""A Malayalam dataset for 3-class classification of Malayalam news articles
46
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
45
+ description="A Malayalam dataset for 3-class classification of Malayalam news articles This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
47
46
  reference="https://github.com/goru001/nlp-for-malyalam",
48
47
  dataset={
49
48
  "path": "mteb/malayalam_news",
@@ -43,8 +43,7 @@ class MarathiNewsClassification(AbsTaskClassification):
43
43
  class MarathiNewsClassificationV2(AbsTaskClassification):
44
44
  metadata = TaskMetadata(
45
45
  name="MarathiNewsClassification.v2",
46
- description="""A Marathi dataset for 3-class classification of Marathi news articles
47
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
46
+ description="A Marathi dataset for 3-class classification of Marathi news articles This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
48
47
  reference="https://github.com/goru001/nlp-for-marathi",
49
48
  dataset={
50
49
  "path": "mteb/marathi_news",
@@ -49,8 +49,7 @@ Bontcheva, Kalina},
49
49
  class MacedonianTweetSentimentClassificationV2(AbsTaskClassification):
50
50
  metadata = TaskMetadata(
51
51
  name="MacedonianTweetSentimentClassification.v2",
52
- description="""An Macedonian dataset for tweet sentiment classification.
53
- This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)""",
52
+ description="An Macedonian dataset for tweet sentiment classification. This version corrects errors found in the original data. For details, see [pull request](https://github.com/embeddings-benchmark/mteb/pull/2900)",
54
53
  reference="https://aclanthology.org/R15-1034/",
55
54
  dataset={
56
55
  "path": "mteb/macedonian_tweet_sentiment",
@@ -10,12 +10,7 @@ _LANGS = {
10
10
  class CataloniaTweetClassification(AbsTaskClassification):
11
11
  metadata = TaskMetadata(
12
12
  name="CataloniaTweetClassification",
13
- description="""This dataset contains two corpora in Spanish and Catalan that consist of annotated Twitter
14
- messages for automatic stance detection. The data was collected over 12 days during February and March
15
- of 2019 from tweets posted in Barcelona, and during September of 2018 from tweets posted in the town of Terrassa, Catalonia.
16
- Each corpus is annotated with three classes: AGAINST, FAVOR and NEUTRAL, which express the stance
17
- towards the target - independence of Catalonia.
18
- """,
13
+ description="This dataset contains two corpora in Spanish and Catalan that consist of annotated Twitter messages for automatic stance detection. The data was collected over 12 days during February and March of 2019 from tweets posted in Barcelona, and during September of 2018 from tweets posted in the town of Terrassa, Catalonia. Each corpus is annotated with three classes: AGAINST, FAVOR and NEUTRAL, which express the stance towards the target - independence of Catalonia.",
19
14
  reference="https://aclanthology.org/2020.lrec-1.171/",
20
15
  dataset={
21
16
  "path": "community-datasets/catalonia_independence",
@@ -24,10 +24,7 @@ class MultiHateClassification(AbsTaskClassification):
24
24
  "path": "mteb/multi-hatecheck",
25
25
  "revision": "8f95949846bb9e33c6aaf730ccfdb8fe6bcfb7a9",
26
26
  },
27
- description="""Hate speech detection dataset with binary
28
- (hateful vs non-hateful) labels. Includes 25+ distinct types of hate
29
- and challenging non-hate, and 11 languages.
30
- """,
27
+ description="Hate speech detection dataset with binary (hateful vs non-hateful) labels. Includes 25+ distinct types of hate and challenging non-hate, and 11 languages.",
31
28
  reference="https://aclanthology.org/2022.woah-1.15/",
32
29
  type="Classification",
33
30
  category="t2c",
@@ -9,11 +9,7 @@ class RuSciBenchCoreRiscClassification(AbsTaskClassification):
9
9
  "path": "mlsa-iai-msu-lab/ru_sci_bench_mteb",
10
10
  "revision": "fbc0599a0b5f00b3c7d87ab4d13490f04fb77f8e",
11
11
  },
12
- description="""This binary classification task aims to determine whether a scientific paper
13
- (based on its title and abstract) belongs to the Core of the Russian Science Citation Index (RISC).
14
- The RISC includes a wide range of publications, but the Core RISC comprises the most cited and prestigious
15
- journals, dissertations, theses, monographs, and studies. The task is provided for both Russian and English
16
- versions of the paper's title and abstract.""",
12
+ description="This binary classification task aims to determine whether a scientific paper (based on its title and abstract) belongs to the Core of the Russian Science Citation Index (RISC). The RISC includes a wide range of publications, but the Core RISC comprises the most cited and prestigious journals, dissertations, theses, monographs, and studies. The task is provided for both Russian and English versions of the paper's title and abstract.",
17
13
  reference="https://github.com/mlsa-iai-msu-lab/ru_sci_bench_mteb",
18
14
  type="Classification",
19
15
  category="t2c",
@@ -57,10 +53,7 @@ class RuSciBenchPubTypeClassification(AbsTaskClassification):
57
53
  "path": "mlsa-iai-msu-lab/ru_sci_bench_mteb",
58
54
  "revision": "fbc0599a0b5f00b3c7d87ab4d13490f04fb77f8e",
59
55
  },
60
- description="""This task involves classifying scientific papers (based on their title and abstract)
61
- into different publication types. The dataset identifies the following types:
62
- 'Article', 'Conference proceedings', 'Survey', 'Miscellanea', 'Short message', 'Review', and 'Personalia'.
63
- This task is available for both Russian and English versions of the paper's title and abstract.""",
56
+ description="This task involves classifying scientific papers (based on their title and abstract) into different publication types. The dataset identifies the following types: 'Article', 'Conference proceedings', 'Survey', 'Miscellanea', 'Short message', 'Review', and 'Personalia'. This task is available for both Russian and English versions of the paper's title and abstract.",
64
57
  reference="https://github.com/mlsa-iai-msu-lab/ru_sci_bench_mteb",
65
58
  type="Classification",
66
59
  category="t2c",
@@ -104,13 +97,7 @@ class RuSciBenchGRNTIClassificationV2(AbsTaskClassification):
104
97
  "path": "mlsa-iai-msu-lab/ru_sci_bench_mteb",
105
98
  "revision": "fbc0599a0b5f00b3c7d87ab4d13490f04fb77f8e",
106
99
  },
107
- description="""Classification of scientific papers based on the GRNTI (State Rubricator of Scientific and
108
- Technical Information) rubricator. GRNTI is a universal hierarchical classification of knowledge domains
109
- adopted in Russia and CIS countries to systematize the entire flow of scientific and technical information.
110
- This task uses the first level of the GRNTI hierarchy and top 28 classes by frequency.
111
-
112
- In this version, English language support has been added and data partitioning has been slightly modified.
113
- """,
100
+ description="Classification of scientific papers based on the GRNTI (State Rubricator of Scientific and Technical Information) rubricator. GRNTI is a universal hierarchical classification of knowledge domains adopted in Russia and CIS countries to systematize the entire flow of scientific and technical information. This task uses the first level of the GRNTI hierarchy and top 28 classes by frequency. In this version, English language support has been added and data partitioning has been slightly modified.",
114
101
  reference="https://github.com/mlsa-iai-msu-lab/ru_sci_bench_mteb",
115
102
  type="Classification",
116
103
  category="t2c",
@@ -154,13 +141,7 @@ class RuSciBenchOECDClassificationV2(AbsTaskClassification):
154
141
  "path": "mlsa-iai-msu-lab/ru_sci_bench_mteb",
155
142
  "revision": "fbc0599a0b5f00b3c7d87ab4d13490f04fb77f8e",
156
143
  },
157
- description="""Classification of scientific papers based on the OECD
158
- (Organization for Economic Co-operation and Development) rubricator. OECD provides
159
- a hierarchical 3-level system of classes for labeling scientific articles.
160
- This task uses the first two levels of the OECD hierarchy, top 29 classes.
161
-
162
- In this version, English language support has been added and data partitioning has been slightly modified.
163
- """,
144
+ description="Classification of scientific papers based on the OECD (Organization for Economic Co-operation and Development) rubricator. OECD provides a hierarchical 3-level system of classes for labeling scientific articles. This task uses the first two levels of the OECD hierarchy, top 29 classes. In this version, English language support has been added and data partitioning has been slightly modified.",
164
145
  reference="https://github.com/mlsa-iai-msu-lab/ru_sci_bench_mteb",
165
146
  type="Classification",
166
147
  category="t2c",