mteb 2.0.5__py3-none-any.whl → 2.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mteb/__init__.py +10 -1
- mteb/_create_dataloaders.py +2 -0
- mteb/abstasks/_stratification.py +1 -1
- mteb/abstasks/abstask.py +6 -1
- mteb/abstasks/dataset_card_template.md +1 -1
- mteb/abstasks/retrieval.py +2 -1
- mteb/abstasks/retrieval_dataset_loaders.py +1 -1
- mteb/abstasks/task_metadata.py +1 -1
- mteb/benchmarks/benchmarks/__init__.py +2 -0
- mteb/benchmarks/benchmarks/benchmarks.py +82 -11
- mteb/benchmarks/get_benchmark.py +1 -1
- mteb/descriptive_stats/Classification/DutchColaClassification.json +54 -0
- mteb/descriptive_stats/Classification/DutchGovernmentBiasClassification.json +54 -0
- mteb/descriptive_stats/Classification/DutchNewsArticlesClassification.json +90 -0
- mteb/descriptive_stats/Classification/DutchSarcasticHeadlinesClassification.json +54 -0
- mteb/descriptive_stats/Classification/IconclassClassification.json +96 -0
- mteb/descriptive_stats/Classification/OpenTenderClassification.json +222 -0
- mteb/descriptive_stats/Classification/VaccinChatNLClassification.json +1068 -0
- mteb/descriptive_stats/Clustering/DutchNewsArticlesClusteringP2P.json +45 -0
- mteb/descriptive_stats/Clustering/DutchNewsArticlesClusteringS2S.json +45 -0
- mteb/descriptive_stats/Clustering/IconclassClusteringS2S.json +48 -0
- mteb/descriptive_stats/Clustering/OpenTenderClusteringP2P.json +111 -0
- mteb/descriptive_stats/Clustering/OpenTenderClusteringS2S.json +111 -0
- mteb/descriptive_stats/Clustering/VABBClusteringP2P.json +60 -0
- mteb/descriptive_stats/Clustering/VABBClusteringS2S.json +60 -0
- mteb/descriptive_stats/Image/Any2AnyMultilingualRetrieval/XFlickr30kCoT2IRetrieval.json +243 -153
- mteb/descriptive_stats/Image/Any2AnyMultilingualRetrieval/XM3600T2IRetrieval.json +999 -629
- mteb/descriptive_stats/Image/Any2AnyRetrieval/OVENIT2TRetrieval.json +33 -17
- mteb/descriptive_stats/Image/DocumentUnderstanding/MIRACLVisionRetrieval.json +574 -0
- mteb/descriptive_stats/MultilabelClassification/CovidDisinformationNLMultiLabelClassification.json +84 -0
- mteb/descriptive_stats/MultilabelClassification/VABBMultiLabelClassification.json +156 -0
- mteb/descriptive_stats/PairClassification/SICKNLPairClassification.json +35 -0
- mteb/descriptive_stats/PairClassification/XLWICNLPairClassification.json +35 -0
- mteb/descriptive_stats/Retrieval/ClimateFEVERHardNegatives.v2.json +30 -0
- mteb/descriptive_stats/Retrieval/DBPediaHardNegatives.v2.json +30 -0
- mteb/descriptive_stats/Retrieval/DutchNewsArticlesRetrieval.json +30 -0
- mteb/descriptive_stats/Retrieval/FEVERHardNegatives.v2.json +30 -0
- mteb/descriptive_stats/Retrieval/HotpotQAHardNegatives.v2.json +30 -0
- mteb/descriptive_stats/Retrieval/LegalQANLRetrieval.json +30 -0
- mteb/descriptive_stats/Retrieval/OpenTenderRetrieval.json +30 -0
- mteb/descriptive_stats/Retrieval/QuoraRetrievalHardNegatives.v2.json +30 -0
- mteb/descriptive_stats/Retrieval/RiaNewsRetrievalHardNegatives.v2.json +30 -0
- mteb/descriptive_stats/Retrieval/VABBRetrieval.json +30 -0
- mteb/descriptive_stats/Retrieval/VDRMultilingualRetrieval.json +184 -0
- mteb/descriptive_stats/Retrieval/bBSARDNLRetrieval.json +30 -0
- mteb/descriptive_stats/STS/SICK-NL-STS.json +28 -0
- mteb/languages/check_language_code.py +11 -3
- mteb/languages/language_scripts.py +4 -0
- mteb/leaderboard/text_segments.py +1 -1
- mteb/models/model_implementations/b1ade_models.py +1 -1
- mteb/models/model_implementations/bge_models.py +1 -3
- mteb/models/model_implementations/bmretriever_models.py +1 -1
- mteb/models/model_implementations/gme_v_models.py +2 -2
- mteb/models/model_implementations/ibm_granite_models.py +1 -1
- mteb/models/model_implementations/inf_models.py +3 -3
- mteb/models/model_implementations/jina_models.py +12 -2
- mteb/models/model_implementations/llm2vec_models.py +1 -1
- mteb/models/model_implementations/misc_models.py +2 -2
- mteb/models/model_implementations/mxbai_models.py +1 -1
- mteb/models/model_implementations/salesforce_models.py +1 -1
- mteb/models/model_implementations/seed_1_6_embedding_models.py +1 -1
- mteb/models/model_implementations/voyage_v.py +9 -9
- mteb/results/task_result.py +6 -8
- mteb/tasks/classification/dan/angry_tweets_classification.py +2 -2
- mteb/tasks/classification/eng/legal_bench_classification.py +3 -3
- mteb/tasks/classification/mya/myanmar_news.py +2 -2
- mteb/tasks/classification/nld/__init__.py +16 -0
- mteb/tasks/classification/nld/dutch_cola_classification.py +38 -0
- mteb/tasks/classification/nld/dutch_government_bias_classification.py +37 -0
- mteb/tasks/classification/nld/dutch_news_articles_classification.py +30 -0
- mteb/tasks/classification/nld/dutch_sarcastic_headlines_classification.py +36 -0
- mteb/tasks/classification/nld/iconclass_classification.py +41 -0
- mteb/tasks/classification/nld/open_tender_classification.py +38 -0
- mteb/tasks/classification/nld/vaccin_chat_nl_classification.py +46 -0
- mteb/tasks/classification/tha/wongnai_reviews_classification.py +1 -1
- mteb/tasks/classification/ukr/ukr_formality_classification.py +2 -2
- mteb/tasks/clustering/__init__.py +1 -0
- mteb/tasks/clustering/nld/__init__.py +17 -0
- mteb/tasks/clustering/nld/dutch_news_articles_clustering_p2p.py +37 -0
- mteb/tasks/clustering/nld/dutch_news_articles_clustering_s2s.py +37 -0
- mteb/tasks/clustering/nld/iconclass_clustering_s2s.py +47 -0
- mteb/tasks/clustering/nld/open_tender_clustering_p2p.py +51 -0
- mteb/tasks/clustering/nld/open_tender_clustering_s2s.py +41 -0
- mteb/tasks/clustering/nld/vabb_clustering_p2p.py +51 -0
- mteb/tasks/clustering/nld/vabb_clustering_s2s.py +51 -0
- mteb/tasks/multilabel_classification/__init__.py +1 -0
- mteb/tasks/multilabel_classification/nld/__init__.py +9 -0
- mteb/tasks/multilabel_classification/nld/covid_disinformation_nl_multi_label_classification.py +88 -0
- mteb/tasks/multilabel_classification/nld/vabb_multi_label_classification.py +44 -0
- mteb/tasks/pair_classification/__init__.py +1 -0
- mteb/tasks/pair_classification/multilingual/indic_xnli_pair_classification.py +9 -8
- mteb/tasks/pair_classification/nld/__init__.py +7 -0
- mteb/tasks/pair_classification/nld/sick_nl_pair_classification.py +36 -0
- mteb/tasks/pair_classification/nld/xlwic_nl_pair_classification.py +41 -0
- mteb/tasks/retrieval/code/code_rag.py +8 -8
- mteb/tasks/retrieval/dan/dan_fever_retrieval.py +1 -1
- mteb/tasks/retrieval/dan/tv2_nordretrieval.py +2 -2
- mteb/tasks/retrieval/dan/twitter_hjerne_retrieval.py +2 -2
- mteb/tasks/retrieval/eng/__init__.py +18 -4
- mteb/tasks/retrieval/eng/climate_fever_retrieval.py +68 -77
- mteb/tasks/retrieval/eng/dbpedia_retrieval.py +55 -50
- mteb/tasks/retrieval/eng/fever_retrieval.py +62 -67
- mteb/tasks/retrieval/eng/hateful_memes_i2t_retrieval.py +0 -4
- mteb/tasks/retrieval/eng/hateful_memes_t2i_retrieval.py +0 -4
- mteb/tasks/retrieval/eng/hotpot_qa_retrieval.py +57 -67
- mteb/tasks/retrieval/eng/legal_summarization_retrieval.py +1 -1
- mteb/tasks/retrieval/eng/memotion_i2t_retrieval.py +0 -3
- mteb/tasks/retrieval/eng/memotion_t2i_retrieval.py +0 -2
- mteb/tasks/retrieval/eng/oven_it2t_retrieval.py +1 -1
- mteb/tasks/retrieval/eng/quora_retrieval.py +51 -46
- mteb/tasks/retrieval/eng/sci_mmir_i2t_retrieval.py +0 -4
- mteb/tasks/retrieval/eng/sci_mmir_t2i_retrieval.py +0 -4
- mteb/tasks/retrieval/eng/vidore_bench_retrieval.py +0 -2
- mteb/tasks/retrieval/jpn/ja_gov_faqs_retrieval.py +1 -1
- mteb/tasks/retrieval/multilingual/belebele_retrieval.py +1 -1
- mteb/tasks/retrieval/multilingual/jina_vdr_bench_retrieval.py +0 -2
- mteb/tasks/retrieval/multilingual/miracl_retrieval.py +1 -1
- mteb/tasks/retrieval/multilingual/miracl_vision_retrieval.py +2 -9
- mteb/tasks/retrieval/multilingual/vidore2_bench_retrieval.py +0 -2
- mteb/tasks/retrieval/multilingual/wit_t2i_retrieval.py +0 -2
- mteb/tasks/retrieval/multilingual/x_flickr30k_co_t2i_retrieval.py +6 -5
- mteb/tasks/retrieval/multilingual/xm3600_t2i_retrieval.py +3 -4
- mteb/tasks/retrieval/nld/__init__.py +10 -0
- mteb/tasks/retrieval/nld/bbsard_nl_retrieval.py +41 -0
- mteb/tasks/retrieval/nld/dutch_news_articles_retrieval.py +30 -0
- mteb/tasks/retrieval/nld/legal_qa_nl_retrieval.py +39 -0
- mteb/tasks/retrieval/nld/open_tender_retrieval.py +38 -0
- mteb/tasks/retrieval/nld/vabb_retrieval.py +41 -0
- mteb/tasks/retrieval/nob/norquad.py +2 -2
- mteb/tasks/retrieval/nob/snl_retrieval.py +2 -2
- mteb/tasks/retrieval/rus/__init__.py +11 -2
- mteb/tasks/retrieval/rus/ria_news_retrieval.py +48 -44
- mteb/tasks/retrieval/tur/tur_hist_quad.py +2 -2
- mteb/tasks/sts/__init__.py +1 -0
- mteb/tasks/sts/nld/__init__.py +5 -0
- mteb/tasks/sts/nld/sick_nl_sts.py +41 -0
- mteb-2.1.1.dist-info/METADATA +253 -0
- {mteb-2.0.5.dist-info → mteb-2.1.1.dist-info}/RECORD +142 -95
- mteb/descriptive_stats/Classification/PersianTextTone.json +0 -56
- mteb/descriptive_stats/Image/Any2TextMutipleChoice/CVBenchCount.json +0 -37
- mteb/descriptive_stats/Image/Any2TextMutipleChoice/CVBenchDepth.json +0 -25
- mteb/descriptive_stats/Image/Any2TextMutipleChoice/CVBenchDistance.json +0 -25
- mteb/descriptive_stats/Image/Any2TextMutipleChoice/CVBenchRelation.json +0 -25
- mteb/descriptive_stats/Image/VisualSTS/STS12VisualSTS.json +0 -20
- mteb/descriptive_stats/Image/VisualSTS/STS13VisualSTS.json +0 -20
- mteb/descriptive_stats/Image/VisualSTS/STS14VisualSTS.json +0 -20
- mteb/descriptive_stats/Image/VisualSTS/STS15VisualSTS.json +0 -20
- mteb/descriptive_stats/Image/VisualSTS/STS16VisualSTS.json +0 -20
- mteb/descriptive_stats/Image/VisualSTS/STS17MultilingualVisualSTS.json +0 -220
- mteb/descriptive_stats/Image/VisualSTS/STSBenchmarkMultilingualVisualSTS.json +0 -402
- mteb/descriptive_stats/Reranking/InstructIR.json +0 -31
- mteb-2.0.5.dist-info/METADATA +0 -455
- {mteb-2.0.5.dist-info → mteb-2.1.1.dist-info}/WHEEL +0 -0
- {mteb-2.0.5.dist-info → mteb-2.1.1.dist-info}/entry_points.txt +0 -0
- {mteb-2.0.5.dist-info → mteb-2.1.1.dist-info}/licenses/LICENSE +0 -0
- {mteb-2.0.5.dist-info → mteb-2.1.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
from mteb.abstasks.retrieval import AbsTaskRetrieval
|
|
2
|
+
from mteb.abstasks.task_metadata import TaskMetadata
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class VABBRetrieval(AbsTaskRetrieval):
|
|
6
|
+
metadata = TaskMetadata(
|
|
7
|
+
name="VABBRetrieval",
|
|
8
|
+
description="This dataset contains the fourteenth edition of the Flemish Academic Bibliography for the Social "
|
|
9
|
+
"Sciences and Humanities (VABB-SHW), a database of academic publications from the social sciences "
|
|
10
|
+
"and humanities authored by researchers affiliated to Flemish universities (more information). "
|
|
11
|
+
"Publications in the database are used as one of the parameters of the Flemish performance-based "
|
|
12
|
+
"research funding system",
|
|
13
|
+
reference="https://zenodo.org/records/14214806",
|
|
14
|
+
dataset={
|
|
15
|
+
"path": "clips/mteb-nl-vabb-ret",
|
|
16
|
+
"revision": "af4a1e5b3ed451103894f86ff6b3ce85085d7b48",
|
|
17
|
+
},
|
|
18
|
+
type="Retrieval",
|
|
19
|
+
category="t2t",
|
|
20
|
+
modalities=["text"],
|
|
21
|
+
eval_splits=["test"],
|
|
22
|
+
eval_langs=["nld-Latn"],
|
|
23
|
+
main_score="ndcg_at_10",
|
|
24
|
+
date=("2009-11-01", "2010-01-01"),
|
|
25
|
+
domains=["Academic", "Written"],
|
|
26
|
+
task_subtypes=["Article retrieval"],
|
|
27
|
+
license="cc-by-nc-sa-4.0",
|
|
28
|
+
annotations_creators="derived",
|
|
29
|
+
dialect=[],
|
|
30
|
+
sample_creation="found",
|
|
31
|
+
bibtex_citation=r"""
|
|
32
|
+
@dataset{aspeslagh2024vabb,
|
|
33
|
+
author = {Aspeslagh, Pieter and Guns, Raf and Engels, Tim C. E.},
|
|
34
|
+
doi = {10.5281/zenodo.14214806},
|
|
35
|
+
publisher = {Zenodo},
|
|
36
|
+
title = {VABB-SHW: Dataset of Flemish Academic Bibliography for the Social Sciences and Humanities (edition 14)},
|
|
37
|
+
url = {https://doi.org/10.5281/zenodo.14214806},
|
|
38
|
+
year = {2024},
|
|
39
|
+
}
|
|
40
|
+
""",
|
|
41
|
+
)
|
|
@@ -59,9 +59,9 @@ Fishel, Mark},
|
|
|
59
59
|
self.data_loaded = True
|
|
60
60
|
|
|
61
61
|
def dataset_transform(self) -> None:
|
|
62
|
-
"""And transform to a retrieval
|
|
62
|
+
"""And transform to a retrieval dataset, which have the following attributes
|
|
63
63
|
|
|
64
|
-
self.corpus = dict[doc_id, dict[str, str]] #id => dict with document
|
|
64
|
+
self.corpus = dict[doc_id, dict[str, str]] #id => dict with document data like title and text
|
|
65
65
|
self.queries = dict[query_id, str] #id => query
|
|
66
66
|
self.relevant_docs = dict[query_id, dict[[doc_id, score]]
|
|
67
67
|
"""
|
|
@@ -46,9 +46,9 @@ class SNLRetrieval(AbsTaskRetrieval):
|
|
|
46
46
|
self.data_loaded = True
|
|
47
47
|
|
|
48
48
|
def dataset_transform(self) -> None:
|
|
49
|
-
"""And transform to a retrieval
|
|
49
|
+
"""And transform to a retrieval dataset, which have the following attributes
|
|
50
50
|
|
|
51
|
-
self.corpus = dict[doc_id, dict[str, str]] #id => dict with document
|
|
51
|
+
self.corpus = dict[doc_id, dict[str, str]] #id => dict with document data like title and text
|
|
52
52
|
self.queries = dict[query_id, str] #id => query
|
|
53
53
|
self.relevant_docs = dict[query_id, dict[[doc_id, score]]
|
|
54
54
|
"""
|
|
@@ -1,4 +1,13 @@
|
|
|
1
|
-
from .ria_news_retrieval import
|
|
1
|
+
from .ria_news_retrieval import (
|
|
2
|
+
RiaNewsRetrieval,
|
|
3
|
+
RiaNewsRetrievalHardNegatives,
|
|
4
|
+
RiaNewsRetrievalHardNegativesV2,
|
|
5
|
+
)
|
|
2
6
|
from .ru_bq_retrieval import RuBQRetrieval
|
|
3
7
|
|
|
4
|
-
__all__ = [
|
|
8
|
+
__all__ = [
|
|
9
|
+
"RiaNewsRetrieval",
|
|
10
|
+
"RiaNewsRetrievalHardNegatives",
|
|
11
|
+
"RiaNewsRetrievalHardNegativesV2",
|
|
12
|
+
"RuBQRetrieval",
|
|
13
|
+
]
|
|
@@ -1,6 +1,31 @@
|
|
|
1
1
|
from mteb.abstasks.retrieval import AbsTaskRetrieval
|
|
2
2
|
from mteb.abstasks.task_metadata import TaskMetadata
|
|
3
3
|
|
|
4
|
+
_ria_news_metadata = dict(
|
|
5
|
+
reference="https://arxiv.org/abs/1901.07786",
|
|
6
|
+
type="Retrieval",
|
|
7
|
+
category="t2t",
|
|
8
|
+
modalities=["text"],
|
|
9
|
+
eval_splits=["test"],
|
|
10
|
+
eval_langs=["rus-Cyrl"],
|
|
11
|
+
main_score="ndcg_at_10",
|
|
12
|
+
date=("2010-01-01", "2014-12-31"),
|
|
13
|
+
domains=["News", "Written"],
|
|
14
|
+
task_subtypes=["Article retrieval"],
|
|
15
|
+
license="cc-by-nc-nd-4.0",
|
|
16
|
+
annotations_creators="derived",
|
|
17
|
+
dialect=[],
|
|
18
|
+
sample_creation="found",
|
|
19
|
+
bibtex_citation=r"""
|
|
20
|
+
@inproceedings{gavrilov2018self,
|
|
21
|
+
author = {Gavrilov, Daniil and Kalaidin, Pavel and Malykh, Valentin},
|
|
22
|
+
booktitle = {Proceedings of the 41st European Conference on Information Retrieval},
|
|
23
|
+
title = {Self-Attentive Model for Headline Generation},
|
|
24
|
+
year = {2019},
|
|
25
|
+
}
|
|
26
|
+
""",
|
|
27
|
+
)
|
|
28
|
+
|
|
4
29
|
|
|
5
30
|
class RiaNewsRetrieval(AbsTaskRetrieval):
|
|
6
31
|
ignore_identical_ids = True
|
|
@@ -12,29 +37,8 @@ class RiaNewsRetrieval(AbsTaskRetrieval):
|
|
|
12
37
|
"revision": "82374b0bbacda6114f39ff9c5b925fa1512ca5d7",
|
|
13
38
|
},
|
|
14
39
|
description="News article retrieval by headline. Based on Rossiya Segodnya dataset.",
|
|
15
|
-
reference="https://arxiv.org/abs/1901.07786",
|
|
16
|
-
type="Retrieval",
|
|
17
|
-
category="t2t",
|
|
18
|
-
modalities=["text"],
|
|
19
|
-
eval_splits=["test"],
|
|
20
|
-
eval_langs=["rus-Cyrl"],
|
|
21
|
-
main_score="ndcg_at_10",
|
|
22
|
-
date=("2010-01-01", "2014-12-31"),
|
|
23
|
-
domains=["News", "Written"],
|
|
24
|
-
task_subtypes=["Article retrieval"],
|
|
25
|
-
license="cc-by-nc-nd-4.0",
|
|
26
|
-
annotations_creators="derived",
|
|
27
|
-
dialect=[],
|
|
28
|
-
sample_creation="found",
|
|
29
|
-
bibtex_citation=r"""
|
|
30
|
-
@inproceedings{gavrilov2018self,
|
|
31
|
-
author = {Gavrilov, Daniil and Kalaidin, Pavel and Malykh, Valentin},
|
|
32
|
-
booktitle = {Proceedings of the 41st European Conference on Information Retrieval},
|
|
33
|
-
title = {Self-Attentive Model for Headline Generation},
|
|
34
|
-
year = {2019},
|
|
35
|
-
}
|
|
36
|
-
""",
|
|
37
40
|
prompt={"query": "Given a news title, retrieve relevant news article"},
|
|
41
|
+
**_ria_news_metadata,
|
|
38
42
|
)
|
|
39
43
|
|
|
40
44
|
|
|
@@ -48,27 +52,27 @@ class RiaNewsRetrievalHardNegatives(AbsTaskRetrieval):
|
|
|
48
52
|
"revision": "d42860a6c15f0a2c4485bda10c6e5b641fdfe479",
|
|
49
53
|
},
|
|
50
54
|
description="News article retrieval by headline. Based on Rossiya Segodnya dataset. The hard negative version has been created by pooling the 250 top documents per query from BM25, e5-multilingual-large and e5-mistral-instruct.",
|
|
51
|
-
reference="https://arxiv.org/abs/1901.07786",
|
|
52
|
-
type="Retrieval",
|
|
53
|
-
category="t2t",
|
|
54
|
-
modalities=["text"],
|
|
55
|
-
eval_splits=["test"],
|
|
56
|
-
eval_langs=["rus-Cyrl"],
|
|
57
|
-
main_score="ndcg_at_10",
|
|
58
|
-
date=("2010-01-01", "2014-12-31"),
|
|
59
|
-
domains=["News", "Written"],
|
|
60
|
-
task_subtypes=["Article retrieval"],
|
|
61
|
-
license="cc-by-nc-nd-4.0",
|
|
62
|
-
annotations_creators="derived",
|
|
63
|
-
dialect=[],
|
|
64
|
-
sample_creation="found",
|
|
65
|
-
bibtex_citation=r"""
|
|
66
|
-
@inproceedings{gavrilov2018self,
|
|
67
|
-
author = {Gavrilov, Daniil and Kalaidin, Pavel and Malykh, Valentin},
|
|
68
|
-
booktitle = {Proceedings of the 41st European Conference on Information Retrieval},
|
|
69
|
-
title = {Self-Attentive Model for Headline Generation},
|
|
70
|
-
year = {2019},
|
|
71
|
-
}
|
|
72
|
-
""",
|
|
73
55
|
adapted_from=["RiaNewsRetrieval"],
|
|
56
|
+
superseded_by="RiaNewsRetrievalHardNegatives.v2",
|
|
57
|
+
**_ria_news_metadata,
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class RiaNewsRetrievalHardNegativesV2(AbsTaskRetrieval):
|
|
62
|
+
ignore_identical_ids = True
|
|
63
|
+
|
|
64
|
+
metadata = TaskMetadata(
|
|
65
|
+
name="RiaNewsRetrievalHardNegatives.v2",
|
|
66
|
+
dataset={
|
|
67
|
+
"path": "mteb/RiaNewsRetrieval_test_top_250_only_w_correct-v2",
|
|
68
|
+
"revision": "d42860a6c15f0a2c4485bda10c6e5b641fdfe479",
|
|
69
|
+
},
|
|
70
|
+
description=(
|
|
71
|
+
"News article retrieval by headline. Based on Rossiya Segodnya dataset. "
|
|
72
|
+
"The hard negative version has been created by pooling the 250 top documents per query from BM25, e5-multilingual-large and e5-mistral-instruct."
|
|
73
|
+
"V2 uses a more appropriate prompt rather than the default prompt for retrieval. You can get more information on the effect of different prompt in the [PR](https://github.com/embeddings-benchmark/mteb/pull/3469#issuecomment-3436467106)"
|
|
74
|
+
),
|
|
75
|
+
adapted_from=["RiaNewsRetrieval"],
|
|
76
|
+
prompt={"query": "Given a news title, retrieve relevant news article"},
|
|
77
|
+
**_ria_news_metadata,
|
|
74
78
|
)
|
|
@@ -42,9 +42,9 @@ class TurHistQuadRetrieval(AbsTaskRetrieval):
|
|
|
42
42
|
)
|
|
43
43
|
|
|
44
44
|
def load_data(self, **kwargs) -> None:
|
|
45
|
-
"""And transform to a retrieval
|
|
45
|
+
"""And transform to a retrieval dataset, which have the following attributes
|
|
46
46
|
|
|
47
|
-
self.corpus = dict[doc_id, dict[str, str]] #id => dict with document
|
|
47
|
+
self.corpus = dict[doc_id, dict[str, str]] #id => dict with document data like title and text
|
|
48
48
|
self.queries = dict[query_id, str] #id => query
|
|
49
49
|
self.relevant_docs = dict[query_id, dict[[doc_id, score]]
|
|
50
50
|
"""
|
mteb/tasks/sts/__init__.py
CHANGED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
from mteb.abstasks import AbsTaskSTS
|
|
2
|
+
from mteb.abstasks.task_metadata import TaskMetadata
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class SICKNLSTS(AbsTaskSTS):
|
|
6
|
+
fast_loading = True
|
|
7
|
+
metadata = TaskMetadata(
|
|
8
|
+
name="SICK-NL-STS",
|
|
9
|
+
dataset={
|
|
10
|
+
"path": "clips/mteb-nl-sick-sts-pr",
|
|
11
|
+
"revision": "7f88f003fc4e37ed8cd9ade84e390d871b032fef",
|
|
12
|
+
},
|
|
13
|
+
description="SICK-NL (read: signal), a dataset targeting Natural Language Inference in Dutch. SICK-NL is "
|
|
14
|
+
"obtained by translating the SICK dataset of (Marelli et al., 2014) from English into Dutch.",
|
|
15
|
+
reference="https://aclanthology.org/2021.eacl-main.126/",
|
|
16
|
+
type="STS",
|
|
17
|
+
category="t2t",
|
|
18
|
+
modalities=["text"],
|
|
19
|
+
eval_splits=["test"],
|
|
20
|
+
eval_langs=["nld-Latn"],
|
|
21
|
+
main_score="cosine_spearman",
|
|
22
|
+
date=("2012-01-01", "2017-12-31"),
|
|
23
|
+
domains=["News", "Social", "Web", "Spoken", "Written"],
|
|
24
|
+
task_subtypes=[],
|
|
25
|
+
license="mit",
|
|
26
|
+
annotations_creators="human-annotated",
|
|
27
|
+
dialect=[],
|
|
28
|
+
sample_creation="machine-translated",
|
|
29
|
+
bibtex_citation=r"""
|
|
30
|
+
@inproceedings{wijnholds2021sick,
|
|
31
|
+
author = {Wijnholds, Gijs and Moortgat, Michael},
|
|
32
|
+
booktitle = {Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume},
|
|
33
|
+
pages = {1474--1479},
|
|
34
|
+
title = {SICK-NL: A Dataset for Dutch Natural Language Inference},
|
|
35
|
+
year = {2021},
|
|
36
|
+
}
|
|
37
|
+
""",
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
min_score = 0
|
|
41
|
+
max_score = 5
|
|
@@ -0,0 +1,253 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: mteb
|
|
3
|
+
Version: 2.1.1
|
|
4
|
+
Summary: Massive Text Embedding Benchmark
|
|
5
|
+
Author-email: MTEB Contributors <niklas@huggingface.co>, Kenneth Enevoldsen <kenneth.enevoldsen@cas.au.dk>, Nouamane Tazi <nouamane@huggingface.co>, Nils Reimers <info@nils-reimers.de>
|
|
6
|
+
Maintainer-email: Kenneth Enevoldsen <kenneth.enevoldsen@cas.au.dk>, Roman Solomatin <risolomatin@gmail.com>, Isaac Chung <chungisaac1217@gmail.com>
|
|
7
|
+
License-Expression: Apache-2.0
|
|
8
|
+
Project-URL: Homepage, https://github.com/embeddings-benchmark/mteb
|
|
9
|
+
Project-URL: Documentation, https://embeddings-benchmark.github.io/mteb/
|
|
10
|
+
Project-URL: Repository, https://github.com/embeddings-benchmark/mteb
|
|
11
|
+
Project-URL: Hugging Face Organization, https://huggingface.co/mteb
|
|
12
|
+
Keywords: deep learning,text embeddings,embeddings,multimodal,benchmark,retrieval,information retrieval
|
|
13
|
+
Classifier: Development Status :: 5 - Production/Stable
|
|
14
|
+
Classifier: Environment :: Console
|
|
15
|
+
Classifier: Intended Audience :: Developers
|
|
16
|
+
Classifier: Intended Audience :: Information Technology
|
|
17
|
+
Classifier: Operating System :: OS Independent
|
|
18
|
+
Classifier: Programming Language :: Python
|
|
19
|
+
Requires-Python: <3.14,>=3.10
|
|
20
|
+
Description-Content-Type: text/markdown
|
|
21
|
+
License-File: LICENSE
|
|
22
|
+
Requires-Dist: datasets>=2.19.0
|
|
23
|
+
Requires-Dist: numpy<3.0.0,>=1.0.0
|
|
24
|
+
Requires-Dist: requests>=2.26.0
|
|
25
|
+
Requires-Dist: scikit-learn>=1.4.0
|
|
26
|
+
Requires-Dist: scipy>=0.0.0
|
|
27
|
+
Requires-Dist: sentence_transformers>=3.0.0
|
|
28
|
+
Requires-Dist: typing-extensions>=4.5.0
|
|
29
|
+
Requires-Dist: torch>1.0.0
|
|
30
|
+
Requires-Dist: tqdm>1.0.0
|
|
31
|
+
Requires-Dist: rich>=0.0.0
|
|
32
|
+
Requires-Dist: pytrec-eval-terrier>=0.5.6
|
|
33
|
+
Requires-Dist: pydantic>=2.0.0
|
|
34
|
+
Requires-Dist: polars>=0.20.22
|
|
35
|
+
Provides-Extra: image
|
|
36
|
+
Requires-Dist: torchvision>0.2.1; extra == "image"
|
|
37
|
+
Provides-Extra: codecarbon
|
|
38
|
+
Requires-Dist: codecarbon<3.0.0,>=2.0.0; extra == "codecarbon"
|
|
39
|
+
Provides-Extra: leaderboard
|
|
40
|
+
Requires-Dist: gradio==5.35.0; extra == "leaderboard"
|
|
41
|
+
Requires-Dist: plotly<6.0.0,>=5.24.0; extra == "leaderboard"
|
|
42
|
+
Requires-Dist: cachetools>=5.2.0; extra == "leaderboard"
|
|
43
|
+
Requires-Dist: matplotlib>=3.9.4; extra == "leaderboard"
|
|
44
|
+
Provides-Extra: peft
|
|
45
|
+
Requires-Dist: peft>=0.11.0; extra == "peft"
|
|
46
|
+
Provides-Extra: flagembedding
|
|
47
|
+
Requires-Dist: FlagEmbedding==1.3.4; extra == "flagembedding"
|
|
48
|
+
Provides-Extra: jina
|
|
49
|
+
Requires-Dist: einops>=0.8.0; extra == "jina"
|
|
50
|
+
Provides-Extra: jina-v4
|
|
51
|
+
Requires-Dist: peft>=0.15.2; extra == "jina-v4"
|
|
52
|
+
Requires-Dist: transformers>=4.52.0; extra == "jina-v4"
|
|
53
|
+
Requires-Dist: torchvision>=0.22.1; extra == "jina-v4"
|
|
54
|
+
Provides-Extra: flash-attention
|
|
55
|
+
Requires-Dist: flash-attn>=2.6.3; extra == "flash-attention"
|
|
56
|
+
Provides-Extra: openai
|
|
57
|
+
Requires-Dist: openai>=1.41.0; extra == "openai"
|
|
58
|
+
Requires-Dist: tiktoken>=0.8.0; extra == "openai"
|
|
59
|
+
Provides-Extra: model2vec
|
|
60
|
+
Requires-Dist: model2vec>=0.3.0; extra == "model2vec"
|
|
61
|
+
Provides-Extra: pylate
|
|
62
|
+
Requires-Dist: pylate>=1.3.1; python_version < "3.13" and extra == "pylate"
|
|
63
|
+
Provides-Extra: bm25s
|
|
64
|
+
Requires-Dist: bm25s>=0.2.6; extra == "bm25s"
|
|
65
|
+
Requires-Dist: PyStemmer>=2.2.0.3; extra == "bm25s"
|
|
66
|
+
Provides-Extra: gritlm
|
|
67
|
+
Requires-Dist: gritlm>=1.0.2; extra == "gritlm"
|
|
68
|
+
Provides-Extra: xformers
|
|
69
|
+
Requires-Dist: xformers>=0.0.29; extra == "xformers"
|
|
70
|
+
Provides-Extra: blip2
|
|
71
|
+
Requires-Dist: salesforce-lavis>=1.0.2; extra == "blip2"
|
|
72
|
+
Provides-Extra: voyageai
|
|
73
|
+
Requires-Dist: voyageai<2.0.0,>0.3.0; extra == "voyageai"
|
|
74
|
+
Provides-Extra: voyage-v
|
|
75
|
+
Requires-Dist: voyageai<2.0.0,>0.3.0; extra == "voyage-v"
|
|
76
|
+
Requires-Dist: tenacity>9.0.0; extra == "voyage-v"
|
|
77
|
+
Provides-Extra: cohere
|
|
78
|
+
Requires-Dist: cohere==5.14.0; extra == "cohere"
|
|
79
|
+
Provides-Extra: vertexai
|
|
80
|
+
Requires-Dist: vertexai==1.71.1; extra == "vertexai"
|
|
81
|
+
Provides-Extra: llm2vec
|
|
82
|
+
Requires-Dist: llm2vec<0.3.0,>=0.2.3; extra == "llm2vec"
|
|
83
|
+
Provides-Extra: timm
|
|
84
|
+
Requires-Dist: timm<1.1.0,>=1.0.15; extra == "timm"
|
|
85
|
+
Provides-Extra: open-clip-torch
|
|
86
|
+
Requires-Dist: open_clip_torch==2.31.0; extra == "open-clip-torch"
|
|
87
|
+
Provides-Extra: nomic
|
|
88
|
+
Requires-Dist: einops>=0.8.1; extra == "nomic"
|
|
89
|
+
Provides-Extra: ark
|
|
90
|
+
Requires-Dist: volcengine-python-sdk[ark]==3.0.2; extra == "ark"
|
|
91
|
+
Requires-Dist: tiktoken>=0.8.0; extra == "ark"
|
|
92
|
+
Provides-Extra: colpali-engine
|
|
93
|
+
Requires-Dist: colpali_engine>=0.3.12; extra == "colpali-engine"
|
|
94
|
+
Provides-Extra: xet
|
|
95
|
+
Requires-Dist: huggingface_hub>=0.32.0; extra == "xet"
|
|
96
|
+
Provides-Extra: youtu
|
|
97
|
+
Requires-Dist: tencentcloud-sdk-python-common>=3.0.1454; extra == "youtu"
|
|
98
|
+
Requires-Dist: tencentcloud-sdk-python-lkeap>=3.0.1451; extra == "youtu"
|
|
99
|
+
Provides-Extra: llama-embed-nemotron
|
|
100
|
+
Requires-Dist: transformers==4.51.0; extra == "llama-embed-nemotron"
|
|
101
|
+
Provides-Extra: faiss-cpu
|
|
102
|
+
Requires-Dist: faiss-cpu>=1.12.0; extra == "faiss-cpu"
|
|
103
|
+
Dynamic: license-file
|
|
104
|
+
|
|
105
|
+
<h1 align="center">
|
|
106
|
+
<img src="docs/images/logos/mteb_logo/dots-icon.png" alt="MTEB" width="28" style="vertical-align: middle; margin-right: 10px;"/> MTEB
|
|
107
|
+
</h1>
|
|
108
|
+
|
|
109
|
+
<h3 align="center" style="border-bottom: none;">Multimodal toolbox for evaluating embeddings and retrieval systems</h3>
|
|
110
|
+
|
|
111
|
+
<p align="center">
|
|
112
|
+
<a href="https://github.com/embeddings-benchmark/mteb/releases">
|
|
113
|
+
<img alt="GitHub release" src="https://img.shields.io/github/release/embeddings-benchmark/mteb.svg">
|
|
114
|
+
</a>
|
|
115
|
+
<a href="https://github.com/embeddings-benchmark/mteb/blob/master/LICENSE">
|
|
116
|
+
<img alt="License" src="https://img.shields.io/github/license/embeddings-benchmark/mteb.svg?color=green">
|
|
117
|
+
</a>
|
|
118
|
+
<a href="https://pepy.tech/project/mteb">
|
|
119
|
+
<img alt="Downloads" src="https://static.pepy.tech/personalized-badge/mteb?period=total&units=international_system&left_color=grey&right_color=orange&left_text=Downloads">
|
|
120
|
+
</a>
|
|
121
|
+
</p>
|
|
122
|
+
|
|
123
|
+
<h4 align="center">
|
|
124
|
+
<p>
|
|
125
|
+
<a href="https://embeddings-benchmark.github.io/mteb/installation/">Installation</a> |
|
|
126
|
+
<a href="https://embeddings-benchmark.github.io/mteb/">Usage</a> |
|
|
127
|
+
<a href="https://huggingface.co/spaces/mteb/leaderboard">Leaderboard</a> |
|
|
128
|
+
<a href="https://embeddings-benchmark.github.io/mteb/">Documentation</a> |
|
|
129
|
+
<a href="#citing">Citing</a>
|
|
130
|
+
</p>
|
|
131
|
+
</h4>
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
<h3 align="center">
|
|
135
|
+
<a href="https://huggingface.co/spaces/mteb/leaderboard"><img style="float: middle; padding: 10px 10px 10px 10px;" width="60" height="55" src="./docs/images/logos/hf_logo.png" /></a>
|
|
136
|
+
</h3>
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
## Installation
|
|
140
|
+
|
|
141
|
+
You can install mteb simply using pip. For more on installation please see the [documentation](https://embeddings-benchmark.github.io/mteb/installation/).
|
|
142
|
+
|
|
143
|
+
```bash
|
|
144
|
+
pip install mteb
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
## Example Usage
|
|
149
|
+
|
|
150
|
+
Below we present a simple use-case example. For more information, see the [documentation](https://embeddings-benchmark.github.io/mteb/).
|
|
151
|
+
|
|
152
|
+
```python
|
|
153
|
+
import mteb
|
|
154
|
+
from sentence_transformers import SentenceTransformer
|
|
155
|
+
|
|
156
|
+
# Select model
|
|
157
|
+
model_name = "sentence-transformers/all-MiniLM-L6-v2"
|
|
158
|
+
model = mteb.get_model(model_name) # if the model is not implemented in MTEB it will be eq. to SentenceTransformer(model_name)
|
|
159
|
+
|
|
160
|
+
# Select tasks
|
|
161
|
+
tasks = mteb.get_tasks(tasks=["Banking77Classification.v2"])
|
|
162
|
+
|
|
163
|
+
# evaluate
|
|
164
|
+
results = mteb.evaluate(model, tasks=tasks)
|
|
165
|
+
```
|
|
166
|
+
|
|
167
|
+
You can also run it using the CLI:
|
|
168
|
+
|
|
169
|
+
```bash
|
|
170
|
+
mteb run \
|
|
171
|
+
-m sentence-transformers/all-MiniLM-L6-v2 \
|
|
172
|
+
-t "Banking77Classification.v2" \
|
|
173
|
+
--output-folder results
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
For more on how to use the CLI check out the [related documentation](https://embeddings-benchmark.github.io/mteb/usage/cli/).
|
|
177
|
+
|
|
178
|
+
## Overview
|
|
179
|
+
|
|
180
|
+
| Overview | |
|
|
181
|
+
|--------------------------------|--------------------------------------------------------------------------------------|
|
|
182
|
+
| 📈 [Leaderboard] | The interactive leaderboard of the benchmark |
|
|
183
|
+
| **Get Started**. | |
|
|
184
|
+
| 🏃 [Get Started] | Overview of how to use mteb |
|
|
185
|
+
| 🤖 [Defining Models] | How to use existing model and define custom ones |
|
|
186
|
+
| 📋 [Selecting tasks] | How to select tasks, benchmarks, splits etc. |
|
|
187
|
+
| 🏭 [Running Evaluation] | How to run the evaluations, including cache management, speeding up evaluations etc. |
|
|
188
|
+
| 📊 [Loading Results] | How to load and work with existing model results |
|
|
189
|
+
| **Overview**. | |
|
|
190
|
+
| 📋 [Tasks] | Overview of available tasks |
|
|
191
|
+
| 📐 [Benchmarks] | Overview of available benchmarks |
|
|
192
|
+
| 🤖 [Models] | Overview of available Models |
|
|
193
|
+
| **Contributing** | |
|
|
194
|
+
| 🤖 [Adding a model] | How to submit a model to MTEB and to the leaderboard |
|
|
195
|
+
| 👩💻 [Adding a dataset] | How to add a new task/dataset to MTEB |
|
|
196
|
+
| 👩💻 [Adding a benchmark] | How to add a new benchmark to MTEB and to the leaderboard |
|
|
197
|
+
| 🤝 [Contributing] | How to contribute to MTEB and set it up for development |
|
|
198
|
+
|
|
199
|
+
[Get Started]: https://embeddings-benchmark.github.io/mteb/usage/get_started/
|
|
200
|
+
[Defining Models]: https://embeddings-benchmark.github.io/mteb/usage/defining_the_model/
|
|
201
|
+
[Selecting tasks]: https://embeddings-benchmark.github.io/mteb/usage/selecting_tasks/
|
|
202
|
+
[Running Evaluation]: https://embeddings-benchmark.github.io/mteb/usage/running_the_evaluation/
|
|
203
|
+
[Loading Results]: https://embeddings-benchmark.github.io/mteb/usage/loading_results/
|
|
204
|
+
[Tasks]: https://embeddings-benchmark.github.io/mteb/overview/available_tasks/any2anymultilingualretrieval/
|
|
205
|
+
[Benchmarks]: https://embeddings-benchmark.github.io/mteb/overview/available_benchmarks/
|
|
206
|
+
[Models]: https://embeddings-benchmark.github.io/mteb/overview/available_models/text/
|
|
207
|
+
[Contributing]: https://embeddings-benchmark.github.io/mteb/CONTRIBUTING/
|
|
208
|
+
[Adding a model]: https://embeddings-benchmark.github.io/mteb/contributing/adding_a_model/
|
|
209
|
+
[Adding a dataset]: https://embeddings-benchmark.github.io/mteb/contributing/adding_a_dataset/
|
|
210
|
+
[Adding a benchmark]: https://embeddings-benchmark.github.io/mteb/contributing/adding_a_benchmark/
|
|
211
|
+
[Leaderboard]: https://huggingface.co/spaces/mteb/leaderboard
|
|
212
|
+
|
|
213
|
+
## Citing
|
|
214
|
+
|
|
215
|
+
MTEB was introduced in "[MTEB: Massive Text Embedding Benchmark](https://arxiv.org/abs/2210.07316)", and heavily expanded in "[MMTEB: Massive Multilingual Text Embedding Benchmark](https://arxiv.org/abs/2502.13595)". When using `mteb`, we recommend that you cite both articles.
|
|
216
|
+
|
|
217
|
+
<details>
|
|
218
|
+
<summary> Bibtex Citation (click to unfold) </summary>
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
```bibtex
|
|
222
|
+
@article{muennighoff2022mteb,
|
|
223
|
+
author = {Muennighoff, Niklas and Tazi, Nouamane and Magne, Loïc and Reimers, Nils},
|
|
224
|
+
title = {MTEB: Massive Text Embedding Benchmark},
|
|
225
|
+
publisher = {arXiv},
|
|
226
|
+
journal={arXiv preprint arXiv:2210.07316},
|
|
227
|
+
year = {2022}
|
|
228
|
+
url = {https://arxiv.org/abs/2210.07316},
|
|
229
|
+
doi = {10.48550/ARXIV.2210.07316},
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
@article{enevoldsen2025mmtebmassivemultilingualtext,
|
|
233
|
+
title={MMTEB: Massive Multilingual Text Embedding Benchmark},
|
|
234
|
+
author={Kenneth Enevoldsen and Isaac Chung and Imene Kerboua and Márton Kardos and Ashwin Mathur and David Stap and Jay Gala and Wissam Siblini and Dominik Krzemiński and Genta Indra Winata and Saba Sturua and Saiteja Utpala and Mathieu Ciancone and Marion Schaeffer and Gabriel Sequeira and Diganta Misra and Shreeya Dhakal and Jonathan Rystrøm and Roman Solomatin and Ömer Çağatan and Akash Kundu and Martin Bernstorff and Shitao Xiao and Akshita Sukhlecha and Bhavish Pahwa and Rafał Poświata and Kranthi Kiran GV and Shawon Ashraf and Daniel Auras and Björn Plüster and Jan Philipp Harries and Loïc Magne and Isabelle Mohr and Mariya Hendriksen and Dawei Zhu and Hippolyte Gisserot-Boukhlef and Tom Aarsen and Jan Kostkan and Konrad Wojtasik and Taemin Lee and Marek Šuppa and Crystina Zhang and Roberta Rocca and Mohammed Hamdy and Andrianos Michail and John Yang and Manuel Faysse and Aleksei Vatolin and Nandan Thakur and Manan Dey and Dipam Vasani and Pranjal Chitale and Simone Tedeschi and Nguyen Tai and Artem Snegirev and Michael Günther and Mengzhou Xia and Weijia Shi and Xing Han Lù and Jordan Clive and Gayatri Krishnakumar and Anna Maksimova and Silvan Wehrli and Maria Tikhonova and Henil Panchal and Aleksandr Abramov and Malte Ostendorff and Zheng Liu and Simon Clematide and Lester James Miranda and Alena Fenogenova and Guangyu Song and Ruqiya Bin Safi and Wen-Ding Li and Alessia Borghini and Federico Cassano and Hongjin Su and Jimmy Lin and Howard Yen and Lasse Hansen and Sara Hooker and Chenghao Xiao and Vaibhav Adlakha and Orion Weller and Siva Reddy and Niklas Muennighoff},
|
|
235
|
+
publisher = {arXiv},
|
|
236
|
+
journal={arXiv preprint arXiv:2502.13595},
|
|
237
|
+
year={2025},
|
|
238
|
+
url={https://arxiv.org/abs/2502.13595},
|
|
239
|
+
doi = {10.48550/arXiv.2502.13595},
|
|
240
|
+
}
|
|
241
|
+
```
|
|
242
|
+
</details>
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
If you use any of the specific benchmarks, we also recommend that you cite the authors of both the benchmark and its tasks:
|
|
246
|
+
|
|
247
|
+
```py
|
|
248
|
+
benchmark = mteb.get_benchmark("MTEB(eng, v2)")
|
|
249
|
+
benchmark.citation # get citation for a specific benchmark
|
|
250
|
+
|
|
251
|
+
# you can also create a table of the task for the appendix using:
|
|
252
|
+
benchmark.tasks.to_latex()
|
|
253
|
+
```
|