mteb 2.7.5__py3-none-any.whl → 2.7.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mteb/abstasks/retrieval.py +2 -2
- mteb/abstasks/retrieval_dataset_loaders.py +8 -10
- mteb/benchmarks/_create_table.py +0 -4
- mteb/evaluate.py +3 -0
- mteb/leaderboard/app.py +4 -1
- mteb/models/get_model_meta.py +21 -2
- mteb/models/model_implementations/voyage_models.py +27 -0
- mteb/results/task_result.py +17 -7
- {mteb-2.7.5.dist-info → mteb-2.7.7.dist-info}/METADATA +1 -1
- {mteb-2.7.5.dist-info → mteb-2.7.7.dist-info}/RECORD +14 -14
- {mteb-2.7.5.dist-info → mteb-2.7.7.dist-info}/WHEEL +0 -0
- {mteb-2.7.5.dist-info → mteb-2.7.7.dist-info}/entry_points.txt +0 -0
- {mteb-2.7.5.dist-info → mteb-2.7.7.dist-info}/licenses/LICENSE +0 -0
- {mteb-2.7.5.dist-info → mteb-2.7.7.dist-info}/top_level.txt +0 -0
mteb/abstasks/retrieval.py
CHANGED
|
@@ -263,7 +263,7 @@ class AbsTaskRetrieval(AbsTask):
|
|
|
263
263
|
return
|
|
264
264
|
|
|
265
265
|
dataset_path = self.metadata.dataset["path"]
|
|
266
|
-
eval_splits = self.
|
|
266
|
+
eval_splits = self.eval_splits
|
|
267
267
|
trust_remote_code = self.metadata.dataset.get("trust_remote_code", False)
|
|
268
268
|
revision = self.metadata.dataset["revision"]
|
|
269
269
|
|
|
@@ -284,7 +284,7 @@ class AbsTaskRetrieval(AbsTask):
|
|
|
284
284
|
)
|
|
285
285
|
|
|
286
286
|
if self.metadata.is_multilingual:
|
|
287
|
-
for lang in self.
|
|
287
|
+
for lang in self.hf_subsets:
|
|
288
288
|
for split in eval_splits:
|
|
289
289
|
_process_data(split, lang)
|
|
290
290
|
else:
|
|
@@ -139,9 +139,9 @@ class RetrievalDatasetLoader:
|
|
|
139
139
|
)
|
|
140
140
|
|
|
141
141
|
def _load_corpus(self, num_proc: int) -> CorpusDatasetType:
|
|
142
|
-
logger.info("Loading Corpus...")
|
|
143
|
-
|
|
144
142
|
config = f"{self.config}-corpus" if self.config is not None else "corpus"
|
|
143
|
+
logger.info("Loading corpus subset: %s", config)
|
|
144
|
+
|
|
145
145
|
corpus_ds = self._load_dataset_split(config, num_proc)
|
|
146
146
|
if "_id" in corpus_ds.column_names:
|
|
147
147
|
corpus_ds = corpus_ds.cast_column("_id", Value("string")).rename_column(
|
|
@@ -152,9 +152,9 @@ class RetrievalDatasetLoader:
|
|
|
152
152
|
return corpus_ds
|
|
153
153
|
|
|
154
154
|
def _load_queries(self, num_proc: int) -> QueryDatasetType:
|
|
155
|
-
logger.info("Loading Queries...")
|
|
156
|
-
|
|
157
155
|
config = f"{self.config}-queries" if self.config is not None else "queries"
|
|
156
|
+
logger.info("Loading queries subset: %s", config)
|
|
157
|
+
|
|
158
158
|
if "query" in self.dataset_configs:
|
|
159
159
|
config = "query"
|
|
160
160
|
queries_ds = self._load_dataset_split(config, num_proc)
|
|
@@ -169,9 +169,9 @@ class RetrievalDatasetLoader:
|
|
|
169
169
|
return queries_ds
|
|
170
170
|
|
|
171
171
|
def _load_qrels(self, num_proc: int) -> RelevantDocumentsType:
|
|
172
|
-
logger.info("Loading qrels...")
|
|
173
|
-
|
|
174
172
|
config = f"{self.config}-qrels" if self.config is not None else "default"
|
|
173
|
+
|
|
174
|
+
logger.info("Loading qrels subset: %s", config)
|
|
175
175
|
if config == "default" and config not in self.dataset_configs:
|
|
176
176
|
if "qrels" in self.dataset_configs:
|
|
177
177
|
config = "qrels"
|
|
@@ -204,11 +204,10 @@ class RetrievalDatasetLoader:
|
|
|
204
204
|
return qrels_dict
|
|
205
205
|
|
|
206
206
|
def _load_top_ranked(self, num_proc: int) -> TopRankedDocumentsType:
|
|
207
|
-
logger.info("Loading Top Ranked")
|
|
208
|
-
|
|
209
207
|
config = (
|
|
210
208
|
f"{self.config}-top_ranked" if self.config is not None else "top_ranked"
|
|
211
209
|
)
|
|
210
|
+
logger.info("Loading top ranked subset: %s", config)
|
|
212
211
|
top_ranked_ds = self._load_dataset_split(config, num_proc)
|
|
213
212
|
top_ranked_ds = top_ranked_ds.cast(
|
|
214
213
|
Features(
|
|
@@ -228,11 +227,10 @@ class RetrievalDatasetLoader:
|
|
|
228
227
|
return top_ranked_dict
|
|
229
228
|
|
|
230
229
|
def _load_instructions(self, num_proc: int) -> InstructionDatasetType:
|
|
231
|
-
logger.info("Loading Instructions")
|
|
232
|
-
|
|
233
230
|
config = (
|
|
234
231
|
f"{self.config}-instruction" if self.config is not None else "instruction"
|
|
235
232
|
)
|
|
233
|
+
logger.info("Loading instruction subset: %s", config)
|
|
236
234
|
instructions_ds = self._load_dataset_split(config, num_proc)
|
|
237
235
|
instructions_ds = instructions_ds.cast(
|
|
238
236
|
Features(
|
mteb/benchmarks/_create_table.py
CHANGED
|
@@ -119,7 +119,6 @@ def _create_summary_table_from_benchmark_results(
|
|
|
119
119
|
|
|
120
120
|
# Build joint table
|
|
121
121
|
joint_table = mean_per_type.copy()
|
|
122
|
-
joint_table = joint_table.drop(models_to_remove, axis=0)
|
|
123
122
|
joint_table.insert(0, "mean", overall_mean)
|
|
124
123
|
joint_table.insert(1, "mean_by_task_type", typed_mean)
|
|
125
124
|
joint_table["borda_rank"] = _get_borda_rank(per_task)
|
|
@@ -359,7 +358,6 @@ def _create_summary_table_mean_public_private(
|
|
|
359
358
|
|
|
360
359
|
# Build joint table
|
|
361
360
|
joint_table = mean_per_type.copy()
|
|
362
|
-
joint_table = joint_table.drop(models_to_remove, axis=0)
|
|
363
361
|
joint_table.insert(0, "mean(public)", public_mean)
|
|
364
362
|
joint_table.insert(1, "mean(private)", private_mean)
|
|
365
363
|
if exclude_private_from_borda:
|
|
@@ -486,7 +484,6 @@ def _create_summary_table_mean_subset(
|
|
|
486
484
|
|
|
487
485
|
# Build joint table
|
|
488
486
|
joint_table = mean_per_type.copy()
|
|
489
|
-
joint_table = joint_table.drop(models_to_remove, axis=0)
|
|
490
487
|
joint_table.insert(0, "mean(subset)", overall_subset_mean)
|
|
491
488
|
joint_table["borda_rank"] = _get_borda_rank(per_subset)
|
|
492
489
|
joint_table = joint_table.sort_values("mean(subset)", ascending=False)
|
|
@@ -605,7 +602,6 @@ def _create_summary_table_mean_task_type(
|
|
|
605
602
|
|
|
606
603
|
# Build joint table
|
|
607
604
|
joint_table = mean_per_type.copy()
|
|
608
|
-
joint_table = joint_table.drop(models_to_remove, axis=0)
|
|
609
605
|
joint_table.insert(0, "mean_by_task_type", typed_mean)
|
|
610
606
|
joint_table = joint_table.sort_values("mean_by_task_type", ascending=False)
|
|
611
607
|
joint_table["borda_rank"] = _get_borda_rank(per_task)
|
mteb/evaluate.py
CHANGED
|
@@ -363,6 +363,9 @@ def evaluate(
|
|
|
363
363
|
num_proc=num_proc,
|
|
364
364
|
)
|
|
365
365
|
combined_results = aggregated_task.combine_task_results(results.task_results)
|
|
366
|
+
if cache:
|
|
367
|
+
cache.save_to_cache(combined_results, meta)
|
|
368
|
+
|
|
366
369
|
return ModelResult(
|
|
367
370
|
model_name=results.model_name,
|
|
368
371
|
model_revision=results.model_revision,
|
mteb/leaderboard/app.py
CHANGED
|
@@ -550,7 +550,10 @@ def get_leaderboard_app(cache: ResultCache = ResultCache()) -> gr.Blocks:
|
|
|
550
550
|
|
|
551
551
|
logger.info("Step 7/7: Building Gradio interface and callbacks...")
|
|
552
552
|
interface_start = time.time()
|
|
553
|
-
with gr.Blocks(
|
|
553
|
+
with gr.Blocks(
|
|
554
|
+
title="MTEB Leaderboard",
|
|
555
|
+
fill_width=True,
|
|
556
|
+
) as demo:
|
|
554
557
|
with gr.Sidebar(
|
|
555
558
|
position="left",
|
|
556
559
|
label="Benchmark Selection and Customization",
|
mteb/models/get_model_meta.py
CHANGED
|
@@ -123,7 +123,10 @@ def get_model(
|
|
|
123
123
|
|
|
124
124
|
|
|
125
125
|
def get_model_meta(
|
|
126
|
-
model_name: str,
|
|
126
|
+
model_name: str,
|
|
127
|
+
revision: str | None = None,
|
|
128
|
+
fetch_from_hf: bool = True,
|
|
129
|
+
fill_missing: bool = False,
|
|
127
130
|
) -> ModelMeta:
|
|
128
131
|
"""A function to fetch a model metadata object by name.
|
|
129
132
|
|
|
@@ -131,6 +134,7 @@ def get_model_meta(
|
|
|
131
134
|
model_name: Name of the model to fetch
|
|
132
135
|
revision: Revision of the model to fetch
|
|
133
136
|
fetch_from_hf: Whether to fetch the model from HuggingFace Hub if not found in the registry
|
|
137
|
+
fill_missing: Computes missing attributes from the metadata including number of parameters and memory usage.
|
|
134
138
|
|
|
135
139
|
Returns:
|
|
136
140
|
A model metadata object
|
|
@@ -142,10 +146,25 @@ def get_model_meta(
|
|
|
142
146
|
raise ValueError(
|
|
143
147
|
f"Model revision {revision} not found for model {model_name}. Expected {model_meta.revision}."
|
|
144
148
|
)
|
|
149
|
+
|
|
150
|
+
if fill_missing and fetch_from_hf:
|
|
151
|
+
original_meta_dict = model_meta.model_dump()
|
|
152
|
+
new_meta = ModelMeta.from_hub(model_name)
|
|
153
|
+
new_meta_dict = new_meta.model_dump(exclude_none=True)
|
|
154
|
+
|
|
155
|
+
updates = {
|
|
156
|
+
k: v
|
|
157
|
+
for k, v in new_meta_dict.items()
|
|
158
|
+
if original_meta_dict.get(k) is None
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
if updates:
|
|
162
|
+
return model_meta.model_copy(update=updates)
|
|
145
163
|
return model_meta
|
|
164
|
+
|
|
146
165
|
if fetch_from_hf:
|
|
147
166
|
logger.info(
|
|
148
|
-
"Model not found in model registry. Attempting to extract metadata by loading the model ({model_name}) using HuggingFace."
|
|
167
|
+
f"Model not found in model registry. Attempting to extract metadata by loading the model ({model_name}) using HuggingFace."
|
|
149
168
|
)
|
|
150
169
|
meta = ModelMeta.from_hub(model_name, revision)
|
|
151
170
|
return meta
|
|
@@ -182,6 +182,7 @@ class VoyageModel(AbsEncoder):
|
|
|
182
182
|
model=self._model_name,
|
|
183
183
|
input_type=input_type,
|
|
184
184
|
output_dtype=output_dtype,
|
|
185
|
+
output_dimension=self.mteb_model_meta.embed_dim,
|
|
185
186
|
).embeddings
|
|
186
187
|
)
|
|
187
188
|
pbar.update(len(batch))
|
|
@@ -215,6 +216,32 @@ model_prompts = {
|
|
|
215
216
|
PromptType.document.value: "document",
|
|
216
217
|
}
|
|
217
218
|
|
|
219
|
+
voyage_4_large_2048d = ModelMeta(
|
|
220
|
+
name="voyageai/voyage-4-large (embed_dim=2048)",
|
|
221
|
+
model_type=["dense"],
|
|
222
|
+
revision="1",
|
|
223
|
+
release_date="2026-01-15",
|
|
224
|
+
languages=None, # supported languages not specified
|
|
225
|
+
loader=VoyageModel,
|
|
226
|
+
loader_kwargs=dict(
|
|
227
|
+
max_tokens=32000,
|
|
228
|
+
model_prompts=model_prompts,
|
|
229
|
+
),
|
|
230
|
+
max_tokens=32000,
|
|
231
|
+
embed_dim=2048,
|
|
232
|
+
open_weights=False,
|
|
233
|
+
n_parameters=None,
|
|
234
|
+
memory_usage_mb=None,
|
|
235
|
+
license=None,
|
|
236
|
+
reference="https://blog.voyageai.com/2026/01/15/voyage-4/",
|
|
237
|
+
similarity_fn_name="cosine",
|
|
238
|
+
framework=["API"],
|
|
239
|
+
use_instructions=True,
|
|
240
|
+
training_datasets=VOYAGE_TRAINING_DATA,
|
|
241
|
+
public_training_code=None,
|
|
242
|
+
public_training_data=None,
|
|
243
|
+
)
|
|
244
|
+
|
|
218
245
|
voyage_4 = ModelMeta(
|
|
219
246
|
name="voyageai/voyage-4",
|
|
220
247
|
model_type=["dense"],
|
mteb/results/task_result.py
CHANGED
|
@@ -647,16 +647,26 @@ class TaskResult(BaseModel):
|
|
|
647
647
|
if split not in splits:
|
|
648
648
|
continue
|
|
649
649
|
seen_subsets = set()
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
650
|
+
if task.is_aggregate:
|
|
651
|
+
# aggregate tasks only have the default subset, but in metadata can be multiple
|
|
652
|
+
new_scores[split] = [
|
|
653
|
+
_scores
|
|
654
|
+
for _scores in self.scores[split]
|
|
655
|
+
if _scores["hf_subset"] == "default"
|
|
656
|
+
]
|
|
657
|
+
seen_subsets = {"default"}
|
|
658
|
+
else:
|
|
659
|
+
new_scores[split] = [
|
|
660
|
+
_scores
|
|
661
|
+
for _scores in self.scores[split]
|
|
662
|
+
if _scores["hf_subset"] in hf_subsets
|
|
663
|
+
]
|
|
656
664
|
for _scores in new_scores[split]:
|
|
657
665
|
seen_subsets.add(_scores["hf_subset"])
|
|
658
666
|
|
|
659
|
-
if seen_subsets != hf_subsets
|
|
667
|
+
if seen_subsets != hf_subsets and not (
|
|
668
|
+
task.is_aggregate and "default" in seen_subsets
|
|
669
|
+
):
|
|
660
670
|
missing_subsets = hf_subsets - seen_subsets
|
|
661
671
|
if len(missing_subsets) > 2:
|
|
662
672
|
subset1, subset2 = list(missing_subsets)[:2]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: mteb
|
|
3
|
-
Version: 2.7.
|
|
3
|
+
Version: 2.7.7
|
|
4
4
|
Summary: Massive Text Embedding Benchmark
|
|
5
5
|
Author-email: MTEB Contributors <niklas@huggingface.co>, Kenneth Enevoldsen <kenneth.enevoldsen@cas.au.dk>, Nouamane Tazi <nouamane@huggingface.co>, Nils Reimers <info@nils-reimers.de>
|
|
6
6
|
Maintainer-email: Kenneth Enevoldsen <kenneth.enevoldsen@cas.au.dk>, Roman Solomatin <risolomatin@gmail.com>, Isaac Chung <chungisaac1217@gmail.com>
|
|
@@ -7,7 +7,7 @@ mteb/_requires_package.py,sha256=eHg_TD9BVZRzNCcQQrUP17d8M1DF_vOd_tVx54AmAnM,301
|
|
|
7
7
|
mteb/_set_seed.py,sha256=HPlPRl__Pe6IG-4UgJqTfplcivJ_wA2kaClbXoHQedM,1178
|
|
8
8
|
mteb/cache.py,sha256=xE2BhrG_ypm2C0nPt6tRQCe393pN1RjDLadQcC6W1WE,27657
|
|
9
9
|
mteb/deprecated_evaluator.py,sha256=F47pBHL3lM8YHGZEPLR4CuVn7aV3gzomNpyP3I3jmmc,27727
|
|
10
|
-
mteb/evaluate.py,sha256=
|
|
10
|
+
mteb/evaluate.py,sha256=XFNaxut-prQDwYw_1F4Oco9B_pumUOHI6Ke2PBLk22U,19718
|
|
11
11
|
mteb/filter_tasks.py,sha256=ZFFEl7jWTSq3kDdM8oHG-RZrrBOP16zcFzezA3OsLiM,7430
|
|
12
12
|
mteb/get_tasks.py,sha256=1BZJF7jADJnnL_ZzsJaTKLITO7u6inAHV0PxCAQfhfc,14402
|
|
13
13
|
mteb/load_results.py,sha256=geGyArDB1CNrqse_QzPqfvY7YvF9fL57LJsuy-DDY58,6532
|
|
@@ -42,8 +42,8 @@ mteb/abstasks/dataset_card_template.md,sha256=aD6l8qc3_jxwoIGJNYLzse-jpRa8hu92Ax
|
|
|
42
42
|
mteb/abstasks/multilabel_classification.py,sha256=rFa_Pw2OsUzqhZS-jh2zFD7I-TNl8bVNJ-DW7EpPapU,9708
|
|
43
43
|
mteb/abstasks/pair_classification.py,sha256=MpjOeIcM_RMqSpkXc8PFDAwDgmb54gEqj6ZOUcOa1aQ,14122
|
|
44
44
|
mteb/abstasks/regression.py,sha256=ZuMZfOwU3G4hr__eHsgdagKKdrbN4-wQMLz45jr9YUc,8946
|
|
45
|
-
mteb/abstasks/retrieval.py,sha256=
|
|
46
|
-
mteb/abstasks/retrieval_dataset_loaders.py,sha256=
|
|
45
|
+
mteb/abstasks/retrieval.py,sha256=BPyRibStAD70JfR0Z1x-VVVfzJDRVSmbOS6uREfpmok,27743
|
|
46
|
+
mteb/abstasks/retrieval_dataset_loaders.py,sha256=p0y1nrWlUrt_aeoR4ocDLEQMLuD_SlMH0gBiUsOwrww,9983
|
|
47
47
|
mteb/abstasks/sts.py,sha256=Xta3KVQE7hHqkPTDptemvNVEG0CsZSVjA-Z52EIBvDE,9576
|
|
48
48
|
mteb/abstasks/task_metadata.py,sha256=_yVgX3yjaxVvK-Cs2nw2wMEkI0xf-QhcGb_byCeyo-w,27210
|
|
49
49
|
mteb/abstasks/zeroshot_classification.py,sha256=UrUErSH4owJujRn-HQI6KUe4By12oRjZeOnvB6lBnNA,6360
|
|
@@ -57,7 +57,7 @@ mteb/abstasks/text/bitext_mining.py,sha256=an9ZlcraS9ZN5tCT8ZQFtMbCrPE4-n7anAAbn
|
|
|
57
57
|
mteb/abstasks/text/reranking.py,sha256=mCzy0-TnZ46_GC9Czl4zWKAPnYK5ur0qtFbPt47m9jQ,7917
|
|
58
58
|
mteb/abstasks/text/summarization.py,sha256=bSgb0XhUzJVuLV1Wjr3HYB_Tn7SjmCDMnkBIEWHO4EQ,7381
|
|
59
59
|
mteb/benchmarks/__init__.py,sha256=MQEVeli-zLaJ7Xg0z7RhXQwsdmm7Ht_W2Ln0rZo1Szc,225
|
|
60
|
-
mteb/benchmarks/_create_table.py,sha256=
|
|
60
|
+
mteb/benchmarks/_create_table.py,sha256=CJL8U0adUbaxr5G26trfYo1tGx8cU2IWWx_ZHU6q6do,22407
|
|
61
61
|
mteb/benchmarks/benchmark.py,sha256=YCGIvJ5Vc6GdCAYSjzwrnfj2A8MkbzNLvvtPBLMSSp8,6327
|
|
62
62
|
mteb/benchmarks/get_benchmark.py,sha256=nzR6cu5yXu1kIJKhd4A2R62xp43Z62bluPbOpNXHMWQ,2545
|
|
63
63
|
mteb/benchmarks/benchmarks/__init__.py,sha256=-o3EMWEfP0eQ8iZpWvTj5r4yuGOUuL9mHk8IgFcpPtk,2330
|
|
@@ -1472,14 +1472,14 @@ mteb/languages/language_family.json,sha256=OUGcHeOIPcZPb2FWmYLhxTS0JxjK5y3Fo6x0P
|
|
|
1472
1472
|
mteb/languages/language_scripts.py,sha256=AjtdlJlVRE3TWASdmREKw0P-xAoVFdl_Rut5DgX6k6c,4149
|
|
1473
1473
|
mteb/languages/programming_languages.py,sha256=zxAakT3OSUnAuTnQ34VyeFIECnNXMlleZmAake6jsZE,211
|
|
1474
1474
|
mteb/leaderboard/__init__.py,sha256=991roXmtRwEQysV-37hWEzWpkvPgMCGRqZTHR-hm2io,88
|
|
1475
|
-
mteb/leaderboard/app.py,sha256=
|
|
1475
|
+
mteb/leaderboard/app.py,sha256=Y3G93VJq6eZMD4_CNzLwSEEGnuNJDqYEYztmzYR85eA,42549
|
|
1476
1476
|
mteb/leaderboard/benchmark_selector.py,sha256=qd-2L20RQ4ACke01UlytkhZok1dkWgfUlXzfET52kGc,7956
|
|
1477
1477
|
mteb/leaderboard/figures.py,sha256=cfOK82rRf-7sCjyP7GBxh4ezhOIt0OhD0_86mKtzLrg,7530
|
|
1478
1478
|
mteb/leaderboard/table.py,sha256=U5mWtrVUTk_6t8T4KAp5qlbFgKh1PD0iKICqNMfhsoY,10462
|
|
1479
1479
|
mteb/leaderboard/text_segments.py,sha256=iMIkS04QQjPbT-SkU0x6fOcS8xRbUYevryu9HydipKM,6570
|
|
1480
1480
|
mteb/models/__init__.py,sha256=ABTuoqiBjBtBWW3LYY7ItBHdylR6jWoy06HH0g6j6fU,910
|
|
1481
1481
|
mteb/models/abs_encoder.py,sha256=We9HlwWP61P4cMyZ080gywvDErA1eVsU9t46PtcNrCM,16830
|
|
1482
|
-
mteb/models/get_model_meta.py,sha256=
|
|
1482
|
+
mteb/models/get_model_meta.py,sha256=h6Z2Q3D_L4BeJbi1gPHTSgelbQjsQn1ALpI8uwNa0ac,6832
|
|
1483
1483
|
mteb/models/instruct_wrapper.py,sha256=XAvvbPnXiTxKhFbmusm2uS8E9BMq8QXRSzQQI1jqKzE,9781
|
|
1484
1484
|
mteb/models/model_meta.py,sha256=5seQwce893SbgSywFsphLuZGQ9rRn1fLl9fj1SfKnXE,32887
|
|
1485
1485
|
mteb/models/models_protocols.py,sha256=HTB4-SYa3SeJXMMSA8o05lHTiLBbq314VW60K_PfcZY,9509
|
|
@@ -1615,7 +1615,7 @@ mteb/models/model_implementations/vdr_models.py,sha256=IGvpE2F42IWBN5QwKSWjsAehT
|
|
|
1615
1615
|
mteb/models/model_implementations/vi_vn_models.py,sha256=7hot8CF5B1UeC4WJXnAAs1C1vbqK2lq7Bw338ztKFDE,6566
|
|
1616
1616
|
mteb/models/model_implementations/vista_models.py,sha256=mcI0La6__LasuLd5P-nkc4Z-r9X_8sYhGFLdVPGPmkw,11033
|
|
1617
1617
|
mteb/models/model_implementations/vlm2vec_models.py,sha256=1iq2i1ZbsPINE8nXoVZsX1Km-4dTTAd6St6J38I8Tew,11951
|
|
1618
|
-
mteb/models/model_implementations/voyage_models.py,sha256=
|
|
1618
|
+
mteb/models/model_implementations/voyage_models.py,sha256=g7WET4MibXN6eABrmhn8uTGXdjwf5Kk4ddqQmHA7v6A,23920
|
|
1619
1619
|
mteb/models/model_implementations/voyage_v.py,sha256=_mJGhskJj9zeHYebEJFuYheLPb-YDyiu6Hny_5LQcAE,8280
|
|
1620
1620
|
mteb/models/model_implementations/xyz_models.py,sha256=69JyOCQHVq19nAG3zQFi-UYYT6I7uHmvTcmRxHvjyc8,1361
|
|
1621
1621
|
mteb/models/model_implementations/youtu_models.py,sha256=P5fh34UJZQObJAbz3Wuzqh9Nw5S7LraqxdtwAX3sDJ8,6028
|
|
@@ -1628,7 +1628,7 @@ mteb/models/search_encoder_index/search_indexes/faiss_search_index.py,sha256=jwC
|
|
|
1628
1628
|
mteb/results/__init__.py,sha256=EXQqK4Am5eIYzD52dpcGAFSdqnC38oE6JHN302oidHc,158
|
|
1629
1629
|
mteb/results/benchmark_results.py,sha256=unBUBJ92ud0UXlkZJLn71WVcf-oUlF6XcITTccz5OBA,20318
|
|
1630
1630
|
mteb/results/model_result.py,sha256=u1eUynaf_KVsdYdz29MACsCDR_48xODapkIGLG-lZa0,15321
|
|
1631
|
-
mteb/results/task_result.py,sha256=
|
|
1631
|
+
mteb/results/task_result.py,sha256=TL9V7P7cXqLljnCHdzzaQ9vZZSXh9qXYFnPRG049alw,34247
|
|
1632
1632
|
mteb/tasks/__init__.py,sha256=izAxU0ip1F_YUwx0dFCuN35BaktdmePh6vlDiHC0kLo,503
|
|
1633
1633
|
mteb/tasks/aggregated_tasks/__init__.py,sha256=Ufgbh1AirxCQkojO3AUhUFWM8zQG10cfdVTkj_PeyLI,104
|
|
1634
1634
|
mteb/tasks/aggregated_tasks/eng/__init__.py,sha256=HgaSyAX8Is5CGE006RgJkLQQVxrx2FmMnm6NHQBDi-4,358
|
|
@@ -2644,9 +2644,9 @@ mteb/types/_metadata.py,sha256=NN-W0S6a5TDV7UkpRx1pyWtGF4TyyCyoPUfHOwdeci8,2290
|
|
|
2644
2644
|
mteb/types/_result.py,sha256=UKNokV9pu3G74MGebocU512aU_fFU9I9nPKnrG9Q0iE,1035
|
|
2645
2645
|
mteb/types/_string_validators.py,sha256=PY-dYq4E8O50VS3bLYdldPWp400fl_WzUjfVSkNWe8U,523
|
|
2646
2646
|
mteb/types/statistics.py,sha256=gElgSShKBXpfcqaZHhU_d2UHln1CyzUj8FN8KFun_UA,4087
|
|
2647
|
-
mteb-2.7.
|
|
2648
|
-
mteb-2.7.
|
|
2649
|
-
mteb-2.7.
|
|
2650
|
-
mteb-2.7.
|
|
2651
|
-
mteb-2.7.
|
|
2652
|
-
mteb-2.7.
|
|
2647
|
+
mteb-2.7.7.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
2648
|
+
mteb-2.7.7.dist-info/METADATA,sha256=4rUFnkpk-5uGKDWjJwqBBQNLDaWwDMo0aZ7SavBbNL8,14457
|
|
2649
|
+
mteb-2.7.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
2650
|
+
mteb-2.7.7.dist-info/entry_points.txt,sha256=8IJoEJFKoDHmVnNev-qJ9pp4Ln7_1-ma9QsXnzVCzGU,39
|
|
2651
|
+
mteb-2.7.7.dist-info/top_level.txt,sha256=OLVIjcQAlWBz0bdmutKlWHLF42FF0hp4uVAg3ZyiG4U,5
|
|
2652
|
+
mteb-2.7.7.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|