mteb 2.7.2__py3-none-any.whl → 2.7.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mteb/_create_dataloaders.py +16 -9
- mteb/_evaluators/any_sts_evaluator.py +10 -5
- mteb/_evaluators/clustering_evaluator.py +10 -4
- mteb/_evaluators/evaluator.py +9 -4
- mteb/_evaluators/image/imagetext_pairclassification_evaluator.py +6 -4
- mteb/_evaluators/pair_classification_evaluator.py +10 -5
- mteb/_evaluators/retrieval_evaluator.py +19 -13
- mteb/_evaluators/retrieval_metrics.py +9 -3
- mteb/_evaluators/sklearn_evaluator.py +14 -10
- mteb/_evaluators/text/bitext_mining_evaluator.py +8 -3
- mteb/_evaluators/text/summarization_evaluator.py +8 -4
- mteb/_evaluators/zeroshot_classification_evaluator.py +10 -3
- mteb/_helpful_enum.py +5 -1
- mteb/abstasks/_data_filter/filters.py +8 -2
- mteb/abstasks/_data_filter/task_pipelines.py +7 -2
- mteb/abstasks/_statistics_calculation.py +6 -4
- mteb/abstasks/abstask.py +17 -9
- mteb/abstasks/aggregate_task_metadata.py +20 -9
- mteb/abstasks/aggregated_task.py +15 -8
- mteb/abstasks/classification.py +15 -6
- mteb/abstasks/clustering.py +17 -8
- mteb/abstasks/clustering_legacy.py +14 -6
- mteb/abstasks/image/image_text_pair_classification.py +17 -7
- mteb/abstasks/multilabel_classification.py +11 -5
- mteb/abstasks/pair_classification.py +19 -9
- mteb/abstasks/regression.py +14 -6
- mteb/abstasks/retrieval.py +28 -17
- mteb/abstasks/retrieval_dataset_loaders.py +11 -8
- mteb/abstasks/sts.py +19 -10
- mteb/abstasks/task_metadata.py +17 -8
- mteb/abstasks/text/bitext_mining.py +14 -7
- mteb/abstasks/text/summarization.py +17 -7
- mteb/abstasks/zeroshot_classification.py +15 -7
- mteb/benchmarks/_create_table.py +13 -3
- mteb/benchmarks/benchmark.py +11 -1
- mteb/benchmarks/benchmarks/__init__.py +2 -0
- mteb/benchmarks/benchmarks/benchmarks.py +41 -2
- mteb/benchmarks/benchmarks/rteb_benchmarks.py +20 -9
- mteb/cache.py +10 -5
- mteb/cli/_display_tasks.py +9 -3
- mteb/cli/build_cli.py +5 -2
- mteb/cli/generate_model_card.py +9 -2
- mteb/deprecated_evaluator.py +16 -12
- mteb/descriptive_stats/Retrieval/BrightAopsRetrieval.json +35 -0
- mteb/descriptive_stats/Retrieval/BrightBiologyLongRetrieval.json +35 -0
- mteb/descriptive_stats/Retrieval/BrightBiologyRetrieval.json +35 -0
- mteb/descriptive_stats/Retrieval/BrightEarthScienceLongRetrieval.json +35 -0
- mteb/descriptive_stats/Retrieval/BrightEarthScienceRetrieval.json +35 -0
- mteb/descriptive_stats/Retrieval/BrightEconomicsLongRetrieval.json +35 -0
- mteb/descriptive_stats/Retrieval/BrightEconomicsRetrieval.json +35 -0
- mteb/descriptive_stats/Retrieval/BrightLeetcodeRetrieval.json +35 -0
- mteb/descriptive_stats/Retrieval/BrightPonyLongRetrieval.json +35 -0
- mteb/descriptive_stats/Retrieval/BrightPonyRetrieval.json +35 -0
- mteb/descriptive_stats/Retrieval/BrightPsychologyLongRetrieval.json +35 -0
- mteb/descriptive_stats/Retrieval/BrightPsychologyRetrieval.json +35 -0
- mteb/descriptive_stats/Retrieval/BrightRoboticsLongRetrieval.json +35 -0
- mteb/descriptive_stats/Retrieval/BrightRoboticsRetrieval.json +35 -0
- mteb/descriptive_stats/Retrieval/BrightStackoverflowLongRetrieval.json +35 -0
- mteb/descriptive_stats/Retrieval/BrightStackoverflowRetrieval.json +35 -0
- mteb/descriptive_stats/Retrieval/BrightSustainableLivingLongRetrieval.json +35 -0
- mteb/descriptive_stats/Retrieval/BrightSustainableLivingRetrieval.json +35 -0
- mteb/descriptive_stats/Retrieval/BrightTheoremQAQuestionsRetrieval.json +35 -0
- mteb/descriptive_stats/Retrieval/BrightTheoremQATheoremsRetrieval.json +35 -0
- mteb/evaluate.py +20 -18
- mteb/filter_tasks.py +12 -7
- mteb/get_tasks.py +9 -4
- mteb/languages/language_scripts.py +8 -3
- mteb/leaderboard/app.py +7 -3
- mteb/leaderboard/table.py +7 -2
- mteb/load_results.py +9 -3
- mteb/models/abs_encoder.py +22 -12
- mteb/models/cache_wrappers/cache_backend_protocol.py +5 -3
- mteb/models/cache_wrappers/cache_backends/_hash_utils.py +8 -4
- mteb/models/cache_wrappers/cache_backends/faiss_cache.py +8 -3
- mteb/models/cache_wrappers/cache_wrapper.py +14 -9
- mteb/models/get_model_meta.py +11 -4
- mteb/models/instruct_wrapper.py +13 -5
- mteb/models/model_implementations/align_models.py +10 -4
- mteb/models/model_implementations/amazon_models.py +1 -0
- mteb/models/model_implementations/andersborges.py +2 -0
- mteb/models/model_implementations/ara_models.py +1 -0
- mteb/models/model_implementations/arctic_models.py +8 -0
- mteb/models/model_implementations/b1ade_models.py +1 -0
- mteb/models/model_implementations/bedrock_models.py +20 -6
- mteb/models/model_implementations/bge_models.py +40 -1
- mteb/models/model_implementations/bica_model.py +1 -0
- mteb/models/model_implementations/blip2_models.py +11 -4
- mteb/models/model_implementations/blip_models.py +17 -4
- mteb/models/model_implementations/bm25.py +22 -14
- mteb/models/model_implementations/bmretriever_models.py +10 -2
- mteb/models/model_implementations/cadet_models.py +1 -0
- mteb/models/model_implementations/cde_models.py +11 -5
- mteb/models/model_implementations/clip_models.py +12 -4
- mteb/models/model_implementations/clips_models.py +3 -0
- mteb/models/model_implementations/codefuse_models.py +5 -0
- mteb/models/model_implementations/codesage_models.py +3 -0
- mteb/models/model_implementations/cohere_models.py +14 -4
- mteb/models/model_implementations/cohere_v.py +14 -4
- mteb/models/model_implementations/colpali_models.py +7 -3
- mteb/models/model_implementations/colqwen_models.py +17 -31
- mteb/models/model_implementations/colsmol_models.py +3 -1
- mteb/models/model_implementations/conan_models.py +11 -4
- mteb/models/model_implementations/dino_models.py +28 -4
- mteb/models/model_implementations/e5_instruct.py +4 -0
- mteb/models/model_implementations/e5_models.py +9 -0
- mteb/models/model_implementations/e5_v.py +10 -4
- mteb/models/model_implementations/eagerworks_models.py +11 -4
- mteb/models/model_implementations/emillykkejensen_models.py +3 -0
- mteb/models/model_implementations/en_code_retriever.py +1 -0
- mteb/models/model_implementations/euler_models.py +1 -0
- mteb/models/model_implementations/evaclip_models.py +13 -4
- mteb/models/model_implementations/fa_models.py +9 -0
- mteb/models/model_implementations/facebookai.py +2 -0
- mteb/models/model_implementations/geogpt_models.py +1 -0
- mteb/models/model_implementations/gme_v_models.py +7 -3
- mteb/models/model_implementations/google_models.py +15 -4
- mteb/models/model_implementations/granite_vision_embedding_models.py +7 -5
- mteb/models/model_implementations/gritlm_models.py +2 -0
- mteb/models/model_implementations/gte_models.py +9 -0
- mteb/models/model_implementations/hinvec_models.py +6 -1
- mteb/models/model_implementations/human.py +1 -0
- mteb/models/model_implementations/ibm_granite_models.py +6 -0
- mteb/models/model_implementations/inf_models.py +2 -0
- mteb/models/model_implementations/jasper_models.py +14 -5
- mteb/models/model_implementations/jina_clip.py +10 -4
- mteb/models/model_implementations/jina_models.py +17 -5
- mteb/models/model_implementations/kalm_models.py +24 -12
- mteb/models/model_implementations/kblab.py +1 -0
- mteb/models/model_implementations/kennethenevoldsen_models.py +2 -0
- mteb/models/model_implementations/kfst.py +1 -0
- mteb/models/model_implementations/kowshik24_models.py +1 -0
- mteb/models/model_implementations/lens_models.py +2 -0
- mteb/models/model_implementations/lgai_embedding_models.py +1 -0
- mteb/models/model_implementations/linq_models.py +7 -1
- mteb/models/model_implementations/listconranker.py +10 -4
- mteb/models/model_implementations/llm2clip_models.py +12 -4
- mteb/models/model_implementations/llm2vec_models.py +20 -6
- mteb/models/model_implementations/mcinext_models.py +8 -2
- mteb/models/model_implementations/mdbr_models.py +2 -0
- mteb/models/model_implementations/misc_models.py +63 -0
- mteb/models/model_implementations/mixedbread_ai_models.py +3 -0
- mteb/models/model_implementations/mme5_models.py +2 -1
- mteb/models/model_implementations/moco_models.py +11 -4
- mteb/models/model_implementations/mod_models.py +2 -1
- mteb/models/model_implementations/model2vec_models.py +23 -4
- mteb/models/model_implementations/moka_models.py +3 -0
- mteb/models/model_implementations/nbailab.py +3 -0
- mteb/models/model_implementations/no_instruct_sentence_models.py +13 -5
- mteb/models/model_implementations/nomic_models.py +16 -4
- mteb/models/model_implementations/nomic_models_vision.py +5 -3
- mteb/models/model_implementations/nvidia_llama_nemoretriever_colemb.py +9 -3
- mteb/models/model_implementations/nvidia_models.py +15 -4
- mteb/models/model_implementations/octen_models.py +3 -1
- mteb/models/model_implementations/openai_models.py +14 -4
- mteb/models/model_implementations/openclip_models.py +17 -4
- mteb/models/model_implementations/opensearch_neural_sparse_models.py +15 -4
- mteb/models/model_implementations/ops_moa_models.py +9 -2
- mteb/models/model_implementations/ordalietech_solon_embeddings_mini_beta_1_1.py +1 -0
- mteb/models/model_implementations/pawan_models.py +1 -0
- mteb/models/model_implementations/piccolo_models.py +2 -0
- mteb/models/model_implementations/promptriever_models.py +16 -6
- mteb/models/model_implementations/pylate_models.py +22 -13
- mteb/models/model_implementations/qodo_models.py +2 -0
- mteb/models/model_implementations/qtack_models.py +1 -0
- mteb/models/model_implementations/qwen3_models.py +11 -1
- mteb/models/model_implementations/qzhou_models.py +2 -0
- mteb/models/model_implementations/random_baseline.py +4 -3
- mteb/models/model_implementations/rasgaard_models.py +1 -0
- mteb/models/model_implementations/reasonir_model.py +65 -0
- mteb/models/model_implementations/repllama_models.py +15 -6
- mteb/models/model_implementations/rerankers_custom.py +13 -4
- mteb/models/model_implementations/rerankers_monot5_based.py +24 -4
- mteb/models/model_implementations/richinfoai_models.py +1 -0
- mteb/models/model_implementations/ru_sentence_models.py +20 -0
- mteb/models/model_implementations/ruri_models.py +10 -0
- mteb/models/model_implementations/salesforce_models.py +10 -1
- mteb/models/model_implementations/samilpwc_models.py +1 -0
- mteb/models/model_implementations/sarashina_embedding_models.py +2 -0
- mteb/models/model_implementations/searchmap_models.py +1 -0
- mteb/models/model_implementations/seed_1_6_embedding_models.py +5 -2
- mteb/models/model_implementations/seed_1_6_embedding_models_1215.py +6 -2
- mteb/models/model_implementations/seed_models.py +2 -1
- mteb/models/model_implementations/sentence_transformers_models.py +18 -0
- mteb/models/model_implementations/shuu_model.py +1 -0
- mteb/models/model_implementations/siglip_models.py +19 -4
- mteb/models/model_implementations/slm_models.py +7 -4
- mteb/models/model_implementations/sonar_models.py +2 -1
- mteb/models/model_implementations/spartan8806_atles_champion.py +1 -0
- mteb/models/model_implementations/stella_models.py +6 -0
- mteb/models/model_implementations/tarka_models.py +2 -0
- mteb/models/model_implementations/text2vec_models.py +3 -0
- mteb/models/model_implementations/ua_sentence_models.py +1 -0
- mteb/models/model_implementations/uae_models.py +10 -4
- mteb/models/model_implementations/vdr_models.py +8 -1
- mteb/models/model_implementations/vi_vn_models.py +6 -0
- mteb/models/model_implementations/vista_models.py +11 -4
- mteb/models/model_implementations/vlm2vec_models.py +11 -4
- mteb/models/model_implementations/voyage_models.py +25 -4
- mteb/models/model_implementations/voyage_v.py +11 -6
- mteb/models/model_implementations/xyz_models.py +1 -0
- mteb/models/model_implementations/youtu_models.py +1 -0
- mteb/models/model_implementations/yuan_models.py +1 -0
- mteb/models/model_implementations/yuan_models_en.py +2 -1
- mteb/models/model_meta.py +47 -9
- mteb/models/models_protocols.py +19 -18
- mteb/models/search_encoder_index/search_backend_protocol.py +7 -3
- mteb/models/search_encoder_index/search_indexes/faiss_search_index.py +12 -4
- mteb/models/search_wrappers.py +19 -12
- mteb/models/sentence_transformer_wrapper.py +4 -3
- mteb/models/vllm_wrapper.py +8 -6
- mteb/results/benchmark_results.py +22 -17
- mteb/results/model_result.py +21 -15
- mteb/results/task_result.py +15 -9
- mteb/similarity_functions.py +8 -2
- mteb/tasks/aggregated_tasks/eng/cqadupstack_retrieval.py +3 -3
- mteb/tasks/aggregated_tasks/eng/sts17_multilingual_visual_sts_eng.py +3 -3
- mteb/tasks/aggregated_tasks/eng/sts_benchmark_multilingual_visual_sts_eng.py +3 -3
- mteb/tasks/aggregated_tasks/fas/cqadupstack_retrieval_fa.py +3 -3
- mteb/tasks/aggregated_tasks/fas/syn_per_chatbot_conv_sa_classification.py +3 -3
- mteb/tasks/aggregated_tasks/multilingual/sts17_multilingual_vision_sts.py +3 -3
- mteb/tasks/aggregated_tasks/multilingual/sts_benchmark_multilingual_visual_sts.py +3 -3
- mteb/tasks/aggregated_tasks/nld/cqadupstack_nl_retrieval.py +3 -3
- mteb/tasks/aggregated_tasks/pol/cqadupstack_retrieval_pl.py +3 -3
- mteb/tasks/clustering/nob/snl_clustering.py +7 -2
- mteb/tasks/clustering/nob/vg_clustering.py +7 -2
- mteb/tasks/retrieval/eng/__init__.py +42 -0
- mteb/tasks/retrieval/eng/bright_retrieval.py +9 -1
- mteb/tasks/retrieval/eng/bright_v1_1_retrieval.py +968 -0
- mteb/tasks/retrieval/eng/limit_retrieval.py +6 -1
- mteb/tasks/retrieval/multilingual/ru_sci_bench_retrieval.py +3 -3
- mteb/types/_encoder_io.py +1 -1
- mteb/types/statistics.py +9 -2
- {mteb-2.7.2.dist-info → mteb-2.7.4.dist-info}/METADATA +1 -1
- {mteb-2.7.2.dist-info → mteb-2.7.4.dist-info}/RECORD +238 -217
- {mteb-2.7.2.dist-info → mteb-2.7.4.dist-info}/WHEEL +0 -0
- {mteb-2.7.2.dist-info → mteb-2.7.4.dist-info}/entry_points.txt +0 -0
- {mteb-2.7.2.dist-info → mteb-2.7.4.dist-info}/licenses/LICENSE +0 -0
- {mteb-2.7.2.dist-info → mteb-2.7.4.dist-info}/top_level.txt +0 -0
mteb/benchmarks/benchmark.py
CHANGED
|
@@ -123,9 +123,19 @@ class RtebBenchmark(Benchmark):
|
|
|
123
123
|
_create_summary_table_mean_public_private,
|
|
124
124
|
)
|
|
125
125
|
|
|
126
|
-
joint_table = _create_summary_table_mean_public_private(
|
|
126
|
+
joint_table = _create_summary_table_mean_public_private(
|
|
127
|
+
benchmark_results, exclude_private_from_borda=True
|
|
128
|
+
)
|
|
129
|
+
# issue 3902: temporary remove the private column from RTEB summary table
|
|
130
|
+
if "Mean (Private)" in joint_table.columns:
|
|
131
|
+
joint_table = joint_table.drop(columns=["Mean (Private)"])
|
|
127
132
|
# For RTEB: all tasks are Retrieval type, so Retrieval column = Mean (Task)
|
|
133
|
+
# but due to 3902, if Private column existed, Mean (Task) was the mean of Public and Private so instead we drop Mean (Task) and rename Mean (Public) to Mean (Task)
|
|
128
134
|
joint_table = joint_table.rename(columns={"Retrieval": "Mean (Task)"})
|
|
135
|
+
if "Mean (Task)" in joint_table.columns:
|
|
136
|
+
joint_table = joint_table.drop(columns=["Mean (Task)"])
|
|
137
|
+
joint_table = joint_table.rename(columns={"Mean (Public)": "Mean (Task)"})
|
|
138
|
+
|
|
129
139
|
return joint_table
|
|
130
140
|
|
|
131
141
|
|
|
@@ -3,6 +3,7 @@ from mteb.benchmarks.benchmarks.benchmarks import (
|
|
|
3
3
|
BEIR_NL,
|
|
4
4
|
BRIGHT,
|
|
5
5
|
BRIGHT_LONG,
|
|
6
|
+
BRIGHT_V1_1,
|
|
6
7
|
BUILT_MTEB,
|
|
7
8
|
C_MTEB,
|
|
8
9
|
CHEMTEB,
|
|
@@ -69,6 +70,7 @@ __all__ = [
|
|
|
69
70
|
"BEIR_NL",
|
|
70
71
|
"BRIGHT",
|
|
71
72
|
"BRIGHT_LONG",
|
|
73
|
+
"BRIGHT_V1_1",
|
|
72
74
|
"BUILT_MTEB",
|
|
73
75
|
"CHEMTEB",
|
|
74
76
|
"CHEMTEB_V1_1",
|
|
@@ -1330,6 +1330,46 @@ This is the long version of the benchmark, which only filter longer documents.
|
|
|
1330
1330
|
""",
|
|
1331
1331
|
)
|
|
1332
1332
|
|
|
1333
|
+
BRIGHT_V1_1 = Benchmark(
|
|
1334
|
+
name="BRIGHT(v1.1)",
|
|
1335
|
+
display_name="Reasoning Retrieval",
|
|
1336
|
+
tasks=get_tasks(
|
|
1337
|
+
tasks=[
|
|
1338
|
+
"BrightBiologyRetrieval",
|
|
1339
|
+
"BrightEarthScienceRetrieval",
|
|
1340
|
+
"BrightEconomicsRetrieval",
|
|
1341
|
+
"BrightPsychologyRetrieval",
|
|
1342
|
+
"BrightRoboticsRetrieval",
|
|
1343
|
+
"BrightStackoverflowRetrieval",
|
|
1344
|
+
"BrightSustainableLivingRetrieval",
|
|
1345
|
+
"BrightPonyRetrieval",
|
|
1346
|
+
"BrightLeetcodeRetrieval",
|
|
1347
|
+
"BrightAopsRetrieval",
|
|
1348
|
+
"BrightTheoremQATheoremsRetrieval",
|
|
1349
|
+
"BrightTheoremQAQuestionsRetrieval",
|
|
1350
|
+
"BrightBiologyLongRetrieval",
|
|
1351
|
+
"BrightEarthScienceLongRetrieval",
|
|
1352
|
+
"BrightEconomicsLongRetrieval",
|
|
1353
|
+
"BrightPsychologyLongRetrieval",
|
|
1354
|
+
"BrightRoboticsLongRetrieval",
|
|
1355
|
+
"BrightStackoverflowLongRetrieval",
|
|
1356
|
+
"BrightSustainableLivingLongRetrieval",
|
|
1357
|
+
"BrightPonyLongRetrieval",
|
|
1358
|
+
],
|
|
1359
|
+
),
|
|
1360
|
+
description="v1.1 refactors the BRIGHT into a different tasks and added prompt to individual tasks.",
|
|
1361
|
+
reference="https://brightbenchmark.github.io/",
|
|
1362
|
+
citation=r"""
|
|
1363
|
+
@article{su2024bright,
|
|
1364
|
+
author = {Su, Hongjin and Yen, Howard and Xia, Mengzhou and Shi, Weijia and Muennighoff, Niklas and Wang, Han-yu and Liu, Haisu and Shi, Quan and Siegel, Zachary S and Tang, Michael and others},
|
|
1365
|
+
journal = {arXiv preprint arXiv:2407.12883},
|
|
1366
|
+
title = {Bright: A realistic and challenging benchmark for reasoning-intensive retrieval},
|
|
1367
|
+
year = {2024},
|
|
1368
|
+
}
|
|
1369
|
+
""",
|
|
1370
|
+
)
|
|
1371
|
+
|
|
1372
|
+
|
|
1333
1373
|
CODE_RAG = Benchmark(
|
|
1334
1374
|
name="CodeRAG",
|
|
1335
1375
|
tasks=get_tasks(
|
|
@@ -1781,8 +1821,7 @@ BEIR_NL = Benchmark(
|
|
|
1781
1821
|
"TRECCOVID-NL",
|
|
1782
1822
|
],
|
|
1783
1823
|
),
|
|
1784
|
-
description="BEIR-NL is a Dutch adaptation of the publicly available BEIR benchmark, created through automated "
|
|
1785
|
-
"translation.",
|
|
1824
|
+
description="BEIR-NL is a Dutch adaptation of the publicly available BEIR benchmark, created through automated translation.",
|
|
1786
1825
|
reference="https://arxiv.org/abs/2412.08329",
|
|
1787
1826
|
contacts=["nikolay-banar"],
|
|
1788
1827
|
citation=r"""
|
|
@@ -10,6 +10,8 @@ RTEB_CITATION = r"""@article{rteb2025,
|
|
|
10
10
|
year = {2025},
|
|
11
11
|
}"""
|
|
12
12
|
|
|
13
|
+
removal_note = "\n\nNote: We have temporarily removed the 'Private' column to read more about this decision out the [announcement](https://github.com/embeddings-benchmark/mteb/issues/3934)."
|
|
14
|
+
|
|
13
15
|
RTEB_MAIN = RtebBenchmark(
|
|
14
16
|
name="RTEB(beta)",
|
|
15
17
|
display_name="RTEB Multilingual",
|
|
@@ -48,7 +50,8 @@ RTEB_MAIN = RtebBenchmark(
|
|
|
48
50
|
"JapaneseLegal1Retrieval",
|
|
49
51
|
],
|
|
50
52
|
),
|
|
51
|
-
description="RTEB (ReTrieval Embedding Benchmark) is a comprehensive benchmark for evaluating text retrieval models across multiple specialized domains including legal, finance, code, and healthcare. It contains diverse retrieval tasks designed to test models' ability to understand domain-specific terminology and retrieve relevant documents in specialized contexts across multiple languages. The dataset includes both open and closed datasets, providing a robust evaluation framework for real-world applications. To submit results on private tasks, please create [open an issue](https://github.com/embeddings-benchmark/mteb/issues)."
|
|
53
|
+
description="RTEB (ReTrieval Embedding Benchmark) is a comprehensive benchmark for evaluating text retrieval models across multiple specialized domains including legal, finance, code, and healthcare. It contains diverse retrieval tasks designed to test models' ability to understand domain-specific terminology and retrieve relevant documents in specialized contexts across multiple languages. The dataset includes both open and closed datasets, providing a robust evaluation framework for real-world applications. To submit results on private tasks, please create [open an issue](https://github.com/embeddings-benchmark/mteb/issues)."
|
|
54
|
+
+ removal_note,
|
|
52
55
|
citation=RTEB_CITATION,
|
|
53
56
|
contacts=["fzowl"],
|
|
54
57
|
)
|
|
@@ -83,7 +86,8 @@ RTEB_ENGLISH = RtebBenchmark(
|
|
|
83
86
|
],
|
|
84
87
|
languages=["eng"],
|
|
85
88
|
),
|
|
86
|
-
description="RTEB English is a subset of RTEB containing retrieval tasks in English across legal, finance, code, and healthcare domains. Includes diverse tasks covering specialized domains such as healthcare and finance. The benchmark includes both open and closed datasets, providing a robust evaluation framework for real-world applications. To submit results on private tasks, please create [open an issue](https://github.com/embeddings-benchmark/mteb/issues)."
|
|
89
|
+
description="RTEB English is a subset of RTEB containing retrieval tasks in English across legal, finance, code, and healthcare domains. Includes diverse tasks covering specialized domains such as healthcare and finance. The benchmark includes both open and closed datasets, providing a robust evaluation framework for real-world applications. To submit results on private tasks, please create [open an issue](https://github.com/embeddings-benchmark/mteb/issues)."
|
|
90
|
+
+ removal_note,
|
|
87
91
|
citation=RTEB_CITATION,
|
|
88
92
|
contacts=["fzowl"],
|
|
89
93
|
)
|
|
@@ -101,7 +105,8 @@ RTEB_FRENCH = RtebBenchmark(
|
|
|
101
105
|
],
|
|
102
106
|
languages=["fra"],
|
|
103
107
|
),
|
|
104
|
-
description="RTEB French is a subset of RTEB containing retrieval tasks in French across legal and general knowledge domains. The benchmark includes both open and closed datasets, providing a robust evaluation framework for real-world applications. To submit results on private tasks, please create [open an issue](https://github.com/embeddings-benchmark/mteb/issues)."
|
|
108
|
+
description="RTEB French is a subset of RTEB containing retrieval tasks in French across legal and general knowledge domains. The benchmark includes both open and closed datasets, providing a robust evaluation framework for real-world applications. To submit results on private tasks, please create [open an issue](https://github.com/embeddings-benchmark/mteb/issues)."
|
|
109
|
+
+ removal_note,
|
|
105
110
|
citation=RTEB_CITATION,
|
|
106
111
|
contacts=["fzowl"],
|
|
107
112
|
)
|
|
@@ -119,7 +124,8 @@ RTEB_GERMAN = RtebBenchmark(
|
|
|
119
124
|
"GermanLegal1Retrieval",
|
|
120
125
|
],
|
|
121
126
|
),
|
|
122
|
-
description="RTEB German is a subset of RTEB containing retrieval tasks in German across legal, healthcare, and business domains. The benchmark includes both open and closed datasets, providing a robust evaluation framework for real-world applications. To submit results on private tasks, please create [open an issue](https://github.com/embeddings-benchmark/mteb/issues)."
|
|
127
|
+
description="RTEB German is a subset of RTEB containing retrieval tasks in German across legal, healthcare, and business domains. The benchmark includes both open and closed datasets, providing a robust evaluation framework for real-world applications. To submit results on private tasks, please create [open an issue](https://github.com/embeddings-benchmark/mteb/issues)."
|
|
128
|
+
+ removal_note,
|
|
123
129
|
citation=RTEB_CITATION,
|
|
124
130
|
contacts=["fzowl"],
|
|
125
131
|
)
|
|
@@ -135,7 +141,8 @@ RTEB_JAPANESE = RtebBenchmark(
|
|
|
135
141
|
"JapaneseLegal1Retrieval",
|
|
136
142
|
],
|
|
137
143
|
),
|
|
138
|
-
description="RTEB Japanese is a subset of RTEB containing retrieval tasks in Japanese across legal and code domains. The benchmark includes both open and closed datasets, providing a robust evaluation framework for real-world applications. To submit results on private tasks, please create [open an issue](https://github.com/embeddings-benchmark/mteb/issues)."
|
|
144
|
+
description="RTEB Japanese is a subset of RTEB containing retrieval tasks in Japanese across legal and code domains. The benchmark includes both open and closed datasets, providing a robust evaluation framework for real-world applications. To submit results on private tasks, please create [open an issue](https://github.com/embeddings-benchmark/mteb/issues)."
|
|
145
|
+
+ removal_note,
|
|
139
146
|
citation=RTEB_CITATION,
|
|
140
147
|
contacts=["fzowl"],
|
|
141
148
|
)
|
|
@@ -156,7 +163,8 @@ RTEB_FINANCE = RtebBenchmark(
|
|
|
156
163
|
"EnglishFinance4Retrieval",
|
|
157
164
|
],
|
|
158
165
|
),
|
|
159
|
-
description="RTEB Finance is a subset of RTEB containing retrieval tasks specifically focused on financial domain including finance benchmarks, Q&A, financial document retrieval, and corporate governance. The benchmark includes both open and closed datasets, providing a robust evaluation framework for real-world applications. To submit results on private tasks, please create [open an issue](https://github.com/embeddings-benchmark/mteb/issues)."
|
|
166
|
+
description="RTEB Finance is a subset of RTEB containing retrieval tasks specifically focused on financial domain including finance benchmarks, Q&A, financial document retrieval, and corporate governance. The benchmark includes both open and closed datasets, providing a robust evaluation framework for real-world applications. To submit results on private tasks, please create [open an issue](https://github.com/embeddings-benchmark/mteb/issues)."
|
|
167
|
+
+ removal_note,
|
|
160
168
|
citation=RTEB_CITATION,
|
|
161
169
|
contacts=["fzowl"],
|
|
162
170
|
)
|
|
@@ -177,7 +185,8 @@ RTEB_LEGAL = RtebBenchmark(
|
|
|
177
185
|
"JapaneseLegal1Retrieval",
|
|
178
186
|
],
|
|
179
187
|
),
|
|
180
|
-
description="RTEB Legal is a subset of RTEB containing retrieval tasks specifically focused on legal domain including case documents, statutes, legal summarization, and multilingual legal Q&A. The benchmark includes both open and closed datasets, providing a robust evaluation framework for real-world applications. To submit results on private tasks, please create [open an issue](https://github.com/embeddings-benchmark/mteb/issues)."
|
|
188
|
+
description="RTEB Legal is a subset of RTEB containing retrieval tasks specifically focused on legal domain including case documents, statutes, legal summarization, and multilingual legal Q&A. The benchmark includes both open and closed datasets, providing a robust evaluation framework for real-world applications. To submit results on private tasks, please create [open an issue](https://github.com/embeddings-benchmark/mteb/issues)."
|
|
189
|
+
+ removal_note,
|
|
181
190
|
citation=RTEB_CITATION,
|
|
182
191
|
contacts=["fzowl"],
|
|
183
192
|
)
|
|
@@ -199,7 +208,8 @@ RTEB_CODE = RtebBenchmark(
|
|
|
199
208
|
"JapaneseCode1Retrieval",
|
|
200
209
|
],
|
|
201
210
|
),
|
|
202
|
-
description="RTEB Code is a subset of RTEB containing retrieval tasks specifically focused on programming and code domains including algorithmic problems, data science tasks, code evaluation, SQL retrieval, and multilingual code retrieval. The benchmark includes both open and closed datasets, providing a robust evaluation framework for real-world applications. To submit results on private tasks, please create [open an issue](https://github.com/embeddings-benchmark/mteb/issues)."
|
|
211
|
+
description="RTEB Code is a subset of RTEB containing retrieval tasks specifically focused on programming and code domains including algorithmic problems, data science tasks, code evaluation, SQL retrieval, and multilingual code retrieval. The benchmark includes both open and closed datasets, providing a robust evaluation framework for real-world applications. To submit results on private tasks, please create [open an issue](https://github.com/embeddings-benchmark/mteb/issues)."
|
|
212
|
+
+ removal_note,
|
|
203
213
|
citation=RTEB_CITATION,
|
|
204
214
|
contacts=["fzowl"],
|
|
205
215
|
)
|
|
@@ -217,7 +227,8 @@ RTEB_HEALTHCARE = RtebBenchmark(
|
|
|
217
227
|
"GermanHealthcare1Retrieval",
|
|
218
228
|
],
|
|
219
229
|
),
|
|
220
|
-
description="RTEB Healthcare is a subset of RTEB containing retrieval tasks specifically focused on healthcare and medical domains including medical Q&A, healthcare information retrieval, cross-lingual medical retrieval, and multilingual medical consultation. The benchmark includes both open and closed datasets, providing a robust evaluation framework for real-world applications. To submit results on private tasks, please create [open an issue](https://github.com/embeddings-benchmark/mteb/issues)."
|
|
230
|
+
description="RTEB Healthcare is a subset of RTEB containing retrieval tasks specifically focused on healthcare and medical domains including medical Q&A, healthcare information retrieval, cross-lingual medical retrieval, and multilingual medical consultation. The benchmark includes both open and closed datasets, providing a robust evaluation framework for real-world applications. To submit results on private tasks, please create [open an issue](https://github.com/embeddings-benchmark/mteb/issues)."
|
|
231
|
+
+ removal_note,
|
|
221
232
|
citation=RTEB_CITATION,
|
|
222
233
|
contacts=["fzowl"],
|
|
223
234
|
)
|
mteb/cache.py
CHANGED
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
1
3
|
import gzip
|
|
2
4
|
import io
|
|
3
5
|
import json
|
|
@@ -7,9 +9,8 @@ import shutil
|
|
|
7
9
|
import subprocess
|
|
8
10
|
import warnings
|
|
9
11
|
from collections import defaultdict
|
|
10
|
-
from collections.abc import Iterable, Sequence
|
|
11
12
|
from pathlib import Path
|
|
12
|
-
from typing import cast
|
|
13
|
+
from typing import TYPE_CHECKING, cast
|
|
13
14
|
|
|
14
15
|
import requests
|
|
15
16
|
from pydantic import ValidationError
|
|
@@ -19,7 +20,11 @@ from mteb.abstasks import AbsTask
|
|
|
19
20
|
from mteb.benchmarks.benchmark import Benchmark
|
|
20
21
|
from mteb.models import ModelMeta
|
|
21
22
|
from mteb.results import BenchmarkResults, ModelResult, TaskResult
|
|
22
|
-
|
|
23
|
+
|
|
24
|
+
if TYPE_CHECKING:
|
|
25
|
+
from collections.abc import Iterable, Sequence
|
|
26
|
+
|
|
27
|
+
from mteb.types import ModelName, Revision
|
|
23
28
|
|
|
24
29
|
logger = logging.getLogger(__name__)
|
|
25
30
|
|
|
@@ -584,7 +589,7 @@ class ResultCache:
|
|
|
584
589
|
|
|
585
590
|
first_model = next(iter(models))
|
|
586
591
|
if isinstance(first_model, ModelMeta):
|
|
587
|
-
models = cast(Iterable[ModelMeta], models)
|
|
592
|
+
models = cast("Iterable[ModelMeta]", models)
|
|
588
593
|
name_and_revision = {
|
|
589
594
|
(m.model_name_as_path(), m.revision or "no_revision_available")
|
|
590
595
|
for m in models
|
|
@@ -595,7 +600,7 @@ class ResultCache:
|
|
|
595
600
|
if (p.parent.parent.name, p.parent.name) in name_and_revision
|
|
596
601
|
]
|
|
597
602
|
|
|
598
|
-
str_models = cast(Sequence[str], models)
|
|
603
|
+
str_models = cast("Sequence[str]", models)
|
|
599
604
|
model_names = {m.replace("/", "__").replace(" ", "_") for m in str_models}
|
|
600
605
|
return [p for p in paths if p.parent.parent.name in model_names]
|
|
601
606
|
|
mteb/cli/_display_tasks.py
CHANGED
|
@@ -1,9 +1,15 @@
|
|
|
1
|
-
from
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING
|
|
2
4
|
|
|
3
|
-
from mteb.abstasks import AbsTask
|
|
4
|
-
from mteb.benchmarks import Benchmark
|
|
5
5
|
from mteb.get_tasks import MTEBTasks
|
|
6
6
|
|
|
7
|
+
if TYPE_CHECKING:
|
|
8
|
+
from collections.abc import Iterable, Sequence
|
|
9
|
+
|
|
10
|
+
from mteb.abstasks import AbsTask
|
|
11
|
+
from mteb.benchmarks import Benchmark
|
|
12
|
+
|
|
7
13
|
|
|
8
14
|
def _display_benchmarks(benchmarks: Sequence[Benchmark]) -> None:
|
|
9
15
|
"""Get all benchmarks available in the MTEB."""
|
mteb/cli/build_cli.py
CHANGED
|
@@ -3,17 +3,20 @@ import logging
|
|
|
3
3
|
import os
|
|
4
4
|
import warnings
|
|
5
5
|
from pathlib import Path
|
|
6
|
+
from typing import TYPE_CHECKING
|
|
6
7
|
|
|
7
8
|
import torch
|
|
8
9
|
from rich.logging import RichHandler
|
|
9
10
|
|
|
10
11
|
import mteb
|
|
11
|
-
from mteb.abstasks.abstask import AbsTask
|
|
12
12
|
from mteb.cache import ResultCache
|
|
13
13
|
from mteb.cli._display_tasks import _display_benchmarks, _display_tasks
|
|
14
14
|
from mteb.cli.generate_model_card import generate_model_card
|
|
15
15
|
from mteb.evaluate import OverwriteStrategy
|
|
16
|
-
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from mteb.abstasks.abstask import AbsTask
|
|
19
|
+
from mteb.types import EncodeKwargs
|
|
17
20
|
|
|
18
21
|
logger = logging.getLogger(__name__)
|
|
19
22
|
|
mteb/cli/generate_model_card.py
CHANGED
|
@@ -1,14 +1,21 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
1
3
|
import logging
|
|
2
4
|
import warnings
|
|
3
|
-
from collections.abc import Sequence
|
|
4
5
|
from pathlib import Path
|
|
6
|
+
from typing import TYPE_CHECKING
|
|
5
7
|
|
|
6
8
|
from huggingface_hub import ModelCard, ModelCardData, repo_exists
|
|
7
9
|
|
|
8
10
|
from mteb.abstasks.abstask import AbsTask
|
|
9
|
-
from mteb.benchmarks.benchmark import Benchmark
|
|
10
11
|
from mteb.cache import ResultCache
|
|
11
12
|
|
|
13
|
+
if TYPE_CHECKING:
|
|
14
|
+
from collections.abc import Sequence
|
|
15
|
+
|
|
16
|
+
from mteb.abstasks.abstask import AbsTask
|
|
17
|
+
from mteb.benchmarks.benchmark import Benchmark
|
|
18
|
+
|
|
12
19
|
logger = logging.getLogger(__name__)
|
|
13
20
|
|
|
14
21
|
|
mteb/deprecated_evaluator.py
CHANGED
|
@@ -6,7 +6,6 @@ import os
|
|
|
6
6
|
import sys
|
|
7
7
|
import traceback
|
|
8
8
|
import warnings
|
|
9
|
-
from collections.abc import Iterable, Sequence
|
|
10
9
|
from copy import deepcopy
|
|
11
10
|
from datetime import datetime
|
|
12
11
|
from itertools import chain
|
|
@@ -18,26 +17,31 @@ import datasets
|
|
|
18
17
|
|
|
19
18
|
import mteb
|
|
20
19
|
from mteb.abstasks import AbsTask
|
|
21
|
-
from mteb.abstasks.aggregated_task import AbsTaskAggregate
|
|
22
|
-
from mteb.abstasks.task_metadata import TaskCategory, TaskType
|
|
23
20
|
from mteb.benchmarks import Benchmark
|
|
24
21
|
from mteb.models import (
|
|
25
22
|
CrossEncoderWrapper,
|
|
26
23
|
ModelMeta,
|
|
27
|
-
MTEBModels,
|
|
28
24
|
SentenceTransformerEncoderWrapper,
|
|
29
25
|
)
|
|
30
26
|
from mteb.results import TaskResult
|
|
31
|
-
|
|
27
|
+
|
|
28
|
+
if TYPE_CHECKING:
|
|
29
|
+
from collections.abc import Iterable, Sequence
|
|
30
|
+
|
|
31
|
+
from sentence_transformers import CrossEncoder, SentenceTransformer
|
|
32
|
+
|
|
33
|
+
from mteb.abstasks.aggregated_task import AbsTaskAggregate
|
|
34
|
+
from mteb.abstasks.task_metadata import TaskCategory, TaskType
|
|
35
|
+
from mteb.models import (
|
|
36
|
+
MTEBModels,
|
|
37
|
+
)
|
|
38
|
+
from mteb.types import EncodeKwargs, ScoresDict
|
|
32
39
|
|
|
33
40
|
if sys.version_info >= (3, 13):
|
|
34
41
|
from warnings import deprecated
|
|
35
42
|
else:
|
|
36
43
|
from typing_extensions import deprecated
|
|
37
44
|
|
|
38
|
-
if TYPE_CHECKING:
|
|
39
|
-
from sentence_transformers import CrossEncoder, SentenceTransformer
|
|
40
|
-
|
|
41
45
|
logger = logging.getLogger(__name__)
|
|
42
46
|
|
|
43
47
|
|
|
@@ -66,9 +70,9 @@ class MTEB:
|
|
|
66
70
|
"""
|
|
67
71
|
if isinstance(next(iter(tasks)), Benchmark):
|
|
68
72
|
self.benchmarks = tasks
|
|
69
|
-
self.tasks = list(chain.from_iterable(cast(Iterable[Benchmark], tasks)))
|
|
73
|
+
self.tasks = list(chain.from_iterable(cast("Iterable[Benchmark]", tasks)))
|
|
70
74
|
elif isinstance(next(iter(tasks)), AbsTask):
|
|
71
|
-
self.tasks = list(cast(Iterable[AbsTask], tasks))
|
|
75
|
+
self.tasks = list(cast("Iterable[AbsTask]", tasks))
|
|
72
76
|
|
|
73
77
|
self.err_logs_path = Path(err_logs_path)
|
|
74
78
|
self._last_evaluated_splits: dict[str, list[str]] = {}
|
|
@@ -313,7 +317,7 @@ class MTEB:
|
|
|
313
317
|
elif isinstance(model, CrossEncoder):
|
|
314
318
|
mteb_model = CrossEncoderWrapper(model)
|
|
315
319
|
else:
|
|
316
|
-
mteb_model = cast(MTEBModels, model)
|
|
320
|
+
mteb_model = cast("MTEBModels", model)
|
|
317
321
|
|
|
318
322
|
meta = self.create_model_meta(mteb_model)
|
|
319
323
|
output_path = self._create_output_folder(meta, output_folder)
|
|
@@ -346,7 +350,7 @@ class MTEB:
|
|
|
346
350
|
)
|
|
347
351
|
|
|
348
352
|
if task.is_aggregate:
|
|
349
|
-
aggregated_task = cast(AbsTaskAggregate, task)
|
|
353
|
+
aggregated_task = cast("AbsTaskAggregate", task)
|
|
350
354
|
self_ = MTEB(tasks=aggregated_task.metadata.tasks)
|
|
351
355
|
aggregated_task_results = self_.run(
|
|
352
356
|
mteb_model,
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
{
|
|
2
|
+
"standard": {
|
|
3
|
+
"num_samples": 188113,
|
|
4
|
+
"number_of_characters": 141769714,
|
|
5
|
+
"documents_text_statistics": {
|
|
6
|
+
"total_text_length": 141734227,
|
|
7
|
+
"min_text_length": 58,
|
|
8
|
+
"average_text_length": 753.8974425803981,
|
|
9
|
+
"max_text_length": 7334,
|
|
10
|
+
"unique_texts": 176508
|
|
11
|
+
},
|
|
12
|
+
"documents_image_statistics": null,
|
|
13
|
+
"queries_text_statistics": {
|
|
14
|
+
"total_text_length": 35487,
|
|
15
|
+
"min_text_length": 85,
|
|
16
|
+
"average_text_length": 319.7027027027027,
|
|
17
|
+
"max_text_length": 1167,
|
|
18
|
+
"unique_texts": 111
|
|
19
|
+
},
|
|
20
|
+
"queries_image_statistics": null,
|
|
21
|
+
"relevant_docs_statistics": {
|
|
22
|
+
"num_relevant_docs": 524,
|
|
23
|
+
"min_relevant_docs_per_query": 1,
|
|
24
|
+
"average_relevant_docs_per_query": 4.7207207207207205,
|
|
25
|
+
"max_relevant_docs_per_query": 8,
|
|
26
|
+
"unique_relevant_docs": 111
|
|
27
|
+
},
|
|
28
|
+
"top_ranked_statistics": {
|
|
29
|
+
"num_top_ranked": 20264921,
|
|
30
|
+
"min_top_ranked_per_query": 176954,
|
|
31
|
+
"average_top_ranked_per_query": 182566.85585585586,
|
|
32
|
+
"max_top_ranked_per_query": 186176
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
{
|
|
2
|
+
"long": {
|
|
3
|
+
"num_samples": 627,
|
|
4
|
+
"number_of_characters": 19398082,
|
|
5
|
+
"documents_text_statistics": {
|
|
6
|
+
"total_text_length": 19344209,
|
|
7
|
+
"min_text_length": 142,
|
|
8
|
+
"average_text_length": 36916.42938931298,
|
|
9
|
+
"max_text_length": 1324201,
|
|
10
|
+
"unique_texts": 498
|
|
11
|
+
},
|
|
12
|
+
"documents_image_statistics": null,
|
|
13
|
+
"queries_text_statistics": {
|
|
14
|
+
"total_text_length": 53873,
|
|
15
|
+
"min_text_length": 89,
|
|
16
|
+
"average_text_length": 523.0388349514564,
|
|
17
|
+
"max_text_length": 2195,
|
|
18
|
+
"unique_texts": 103
|
|
19
|
+
},
|
|
20
|
+
"queries_image_statistics": null,
|
|
21
|
+
"relevant_docs_statistics": {
|
|
22
|
+
"num_relevant_docs": 134,
|
|
23
|
+
"min_relevant_docs_per_query": 1,
|
|
24
|
+
"average_relevant_docs_per_query": 1.3009708737864079,
|
|
25
|
+
"max_relevant_docs_per_query": 4,
|
|
26
|
+
"unique_relevant_docs": 134
|
|
27
|
+
},
|
|
28
|
+
"top_ranked_statistics": {
|
|
29
|
+
"num_top_ranked": 53972,
|
|
30
|
+
"min_top_ranked_per_query": 524,
|
|
31
|
+
"average_top_ranked_per_query": 524.0,
|
|
32
|
+
"max_top_ranked_per_query": 524
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
{
|
|
2
|
+
"standard": {
|
|
3
|
+
"num_samples": 57462,
|
|
4
|
+
"number_of_characters": 18936054,
|
|
5
|
+
"documents_text_statistics": {
|
|
6
|
+
"total_text_length": 18882181,
|
|
7
|
+
"min_text_length": 1,
|
|
8
|
+
"average_text_length": 329.192994996426,
|
|
9
|
+
"max_text_length": 31130,
|
|
10
|
+
"unique_texts": 49434
|
|
11
|
+
},
|
|
12
|
+
"documents_image_statistics": null,
|
|
13
|
+
"queries_text_statistics": {
|
|
14
|
+
"total_text_length": 53873,
|
|
15
|
+
"min_text_length": 89,
|
|
16
|
+
"average_text_length": 523.0388349514564,
|
|
17
|
+
"max_text_length": 2195,
|
|
18
|
+
"unique_texts": 103
|
|
19
|
+
},
|
|
20
|
+
"queries_image_statistics": null,
|
|
21
|
+
"relevant_docs_statistics": {
|
|
22
|
+
"num_relevant_docs": 374,
|
|
23
|
+
"min_relevant_docs_per_query": 1,
|
|
24
|
+
"average_relevant_docs_per_query": 3.6310679611650487,
|
|
25
|
+
"max_relevant_docs_per_query": 19,
|
|
26
|
+
"unique_relevant_docs": 374
|
|
27
|
+
},
|
|
28
|
+
"top_ranked_statistics": {
|
|
29
|
+
"num_top_ranked": 5907977,
|
|
30
|
+
"min_top_ranked_per_query": 57359,
|
|
31
|
+
"average_top_ranked_per_query": 57359.0,
|
|
32
|
+
"max_top_ranked_per_query": 57359
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
{
|
|
2
|
+
"long": {
|
|
3
|
+
"num_samples": 717,
|
|
4
|
+
"number_of_characters": 41696684,
|
|
5
|
+
"documents_text_statistics": {
|
|
6
|
+
"total_text_length": 41641374,
|
|
7
|
+
"min_text_length": 28,
|
|
8
|
+
"average_text_length": 69286.81198003328,
|
|
9
|
+
"max_text_length": 2627262,
|
|
10
|
+
"unique_texts": 587
|
|
11
|
+
},
|
|
12
|
+
"documents_image_statistics": null,
|
|
13
|
+
"queries_text_statistics": {
|
|
14
|
+
"total_text_length": 55310,
|
|
15
|
+
"min_text_length": 83,
|
|
16
|
+
"average_text_length": 476.8103448275862,
|
|
17
|
+
"max_text_length": 1565,
|
|
18
|
+
"unique_texts": 116
|
|
19
|
+
},
|
|
20
|
+
"queries_image_statistics": null,
|
|
21
|
+
"relevant_docs_statistics": {
|
|
22
|
+
"num_relevant_docs": 187,
|
|
23
|
+
"min_relevant_docs_per_query": 1,
|
|
24
|
+
"average_relevant_docs_per_query": 1.6120689655172413,
|
|
25
|
+
"max_relevant_docs_per_query": 4,
|
|
26
|
+
"unique_relevant_docs": 187
|
|
27
|
+
},
|
|
28
|
+
"top_ranked_statistics": {
|
|
29
|
+
"num_top_ranked": 69716,
|
|
30
|
+
"min_top_ranked_per_query": 601,
|
|
31
|
+
"average_top_ranked_per_query": 601.0,
|
|
32
|
+
"max_top_ranked_per_query": 601
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
{
|
|
2
|
+
"standard": {
|
|
3
|
+
"num_samples": 121365,
|
|
4
|
+
"number_of_characters": 40478259,
|
|
5
|
+
"documents_text_statistics": {
|
|
6
|
+
"total_text_length": 40422949,
|
|
7
|
+
"min_text_length": 1,
|
|
8
|
+
"average_text_length": 333.3878959826473,
|
|
9
|
+
"max_text_length": 233622,
|
|
10
|
+
"unique_texts": 117633
|
|
11
|
+
},
|
|
12
|
+
"documents_image_statistics": null,
|
|
13
|
+
"queries_text_statistics": {
|
|
14
|
+
"total_text_length": 55310,
|
|
15
|
+
"min_text_length": 83,
|
|
16
|
+
"average_text_length": 476.8103448275862,
|
|
17
|
+
"max_text_length": 1565,
|
|
18
|
+
"unique_texts": 116
|
|
19
|
+
},
|
|
20
|
+
"queries_image_statistics": null,
|
|
21
|
+
"relevant_docs_statistics": {
|
|
22
|
+
"num_relevant_docs": 609,
|
|
23
|
+
"min_relevant_docs_per_query": 1,
|
|
24
|
+
"average_relevant_docs_per_query": 5.25,
|
|
25
|
+
"max_relevant_docs_per_query": 23,
|
|
26
|
+
"unique_relevant_docs": 609
|
|
27
|
+
},
|
|
28
|
+
"top_ranked_statistics": {
|
|
29
|
+
"num_top_ranked": 14064884,
|
|
30
|
+
"min_top_ranked_per_query": 121249,
|
|
31
|
+
"average_top_ranked_per_query": 121249.0,
|
|
32
|
+
"max_top_ranked_per_query": 121249
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
{
|
|
2
|
+
"long": {
|
|
3
|
+
"num_samples": 619,
|
|
4
|
+
"number_of_characters": 19993261,
|
|
5
|
+
"documents_text_statistics": {
|
|
6
|
+
"total_text_length": 19917079,
|
|
7
|
+
"min_text_length": 43,
|
|
8
|
+
"average_text_length": 38598.99031007752,
|
|
9
|
+
"max_text_length": 429507,
|
|
10
|
+
"unique_texts": 515
|
|
11
|
+
},
|
|
12
|
+
"documents_image_statistics": null,
|
|
13
|
+
"queries_text_statistics": {
|
|
14
|
+
"total_text_length": 76182,
|
|
15
|
+
"min_text_length": 164,
|
|
16
|
+
"average_text_length": 739.6310679611651,
|
|
17
|
+
"max_text_length": 2223,
|
|
18
|
+
"unique_texts": 103
|
|
19
|
+
},
|
|
20
|
+
"queries_image_statistics": null,
|
|
21
|
+
"relevant_docs_statistics": {
|
|
22
|
+
"num_relevant_docs": 109,
|
|
23
|
+
"min_relevant_docs_per_query": 1,
|
|
24
|
+
"average_relevant_docs_per_query": 1.058252427184466,
|
|
25
|
+
"max_relevant_docs_per_query": 3,
|
|
26
|
+
"unique_relevant_docs": 109
|
|
27
|
+
},
|
|
28
|
+
"top_ranked_statistics": {
|
|
29
|
+
"num_top_ranked": 53148,
|
|
30
|
+
"min_top_ranked_per_query": 516,
|
|
31
|
+
"average_top_ranked_per_query": 516.0,
|
|
32
|
+
"max_top_ranked_per_query": 516
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
{
|
|
2
|
+
"standard": {
|
|
3
|
+
"num_samples": 50323,
|
|
4
|
+
"number_of_characters": 19882579,
|
|
5
|
+
"documents_text_statistics": {
|
|
6
|
+
"total_text_length": 19806397,
|
|
7
|
+
"min_text_length": 1,
|
|
8
|
+
"average_text_length": 394.3926125049781,
|
|
9
|
+
"max_text_length": 39672,
|
|
10
|
+
"unique_texts": 40594
|
|
11
|
+
},
|
|
12
|
+
"documents_image_statistics": null,
|
|
13
|
+
"queries_text_statistics": {
|
|
14
|
+
"total_text_length": 76182,
|
|
15
|
+
"min_text_length": 164,
|
|
16
|
+
"average_text_length": 739.6310679611651,
|
|
17
|
+
"max_text_length": 2223,
|
|
18
|
+
"unique_texts": 103
|
|
19
|
+
},
|
|
20
|
+
"queries_image_statistics": null,
|
|
21
|
+
"relevant_docs_statistics": {
|
|
22
|
+
"num_relevant_docs": 823,
|
|
23
|
+
"min_relevant_docs_per_query": 1,
|
|
24
|
+
"average_relevant_docs_per_query": 7.990291262135922,
|
|
25
|
+
"max_relevant_docs_per_query": 85,
|
|
26
|
+
"unique_relevant_docs": 823
|
|
27
|
+
},
|
|
28
|
+
"top_ranked_statistics": {
|
|
29
|
+
"num_top_ranked": 5172660,
|
|
30
|
+
"min_top_ranked_per_query": 50220,
|
|
31
|
+
"average_top_ranked_per_query": 50220.0,
|
|
32
|
+
"max_top_ranked_per_query": 50220
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
}
|