mteb 2.6.8__py3-none-any.whl → 2.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -148,7 +148,7 @@ llama_nemoretriever_colembed_1b_v1 = ModelMeta(
148
148
  name="nvidia/llama-nemoretriever-colembed-1b-v1",
149
149
  model_type=["late-interaction"],
150
150
  languages=["eng-Latn"],
151
- revision="1f0fdea7f5b19532a750be109b19072d719b8177",
151
+ revision="6eade800103413033f260bb55b49fe039fd28a6e",
152
152
  release_date="2025-06-27",
153
153
  modalities=["image", "text"],
154
154
  n_parameters=2_418_000_000,
@@ -175,7 +175,7 @@ llama_nemoretriever_colembed_3b_v1 = ModelMeta(
175
175
  name="nvidia/llama-nemoretriever-colembed-3b-v1",
176
176
  model_type=["late-interaction"],
177
177
  languages=["eng-Latn"],
178
- revision="50c36f4d5271c6851aa08bd26d69f6e7ca8b870c",
178
+ revision="4194bdd2cd2871f220ddba6273ce173ef1217a1e",
179
179
  release_date="2025-06-27",
180
180
  modalities=["image", "text"],
181
181
  n_parameters=4_407_000_000,
@@ -19,14 +19,24 @@ from mteb.types import Array, BatchedInput, PromptType
19
19
 
20
20
  logger = logging.getLogger(__name__)
21
21
 
22
- NV_RETRIEVER_CITATION = """@misc{moreira2025nvretrieverimprovingtextembedding,
23
- title={NV-Retriever: Improving text embedding models with effective hard-negative mining},
24
- author={Gabriel de Souza P. Moreira and Radek Osmulski and Mengyao Xu and Ronay Ak and Benedikt Schifferer and Even Oldridge},
22
+ NV_RETRIEVER_CITATION = """@misc{lee2025nvembedimprovedtechniquestraining,
23
+ title={NV-Embed: Improved Techniques for Training LLMs as Generalist Embedding Models},
24
+ author={Chankyu Lee and Rajarshi Roy and Mengyao Xu and Jonathan Raiman and Mohammad Shoeybi and Bryan Catanzaro and Wei Ping},
25
25
  year={2025},
26
- eprint={2407.15831},
26
+ eprint={2405.17428},
27
27
  archivePrefix={arXiv},
28
- primaryClass={cs.IR},
29
- url={https://arxiv.org/abs/2407.15831}
28
+ primaryClass={cs.CL},
29
+ url={https://arxiv.org/abs/2405.17428},
30
+ }"""
31
+
32
+ LlamaEmbedNemotron_CITATION = """@misc{babakhin2025llamaembednemotron8buniversaltextembedding,
33
+ title={Llama-Embed-Nemotron-8B: A Universal Text Embedding Model for Multilingual and Cross-Lingual Tasks},
34
+ author={Yauhen Babakhin and Radek Osmulski and Ronay Ak and Gabriel Moreira and Mengyao Xu and Benedikt Schifferer and Bo Liu and Even Oldridge},
35
+ year={2025},
36
+ eprint={2511.07025},
37
+ archivePrefix={arXiv},
38
+ primaryClass={cs.CL},
39
+ url={https://arxiv.org/abs/2511.07025},
30
40
  }"""
31
41
 
32
42
 
@@ -614,8 +624,8 @@ llama_embed_nemotron_8b = ModelMeta(
614
624
  framework=["PyTorch", "Sentence Transformers", "safetensors", "Transformers"],
615
625
  use_instructions=True,
616
626
  training_datasets=llama_embed_nemotron_training_datasets,
617
- public_training_code=None, # Will be released later
618
- public_training_data=None, # Will be released later
627
+ public_training_code="https://github.com/NVIDIA-NeMo/Automodel/tree/main/examples/biencoder/llama_embed_nemotron_8b",
628
+ public_training_data="https://huggingface.co/datasets/nvidia/embed-nemotron-dataset-v1",
619
629
  contacts=["ybabakhin"],
620
- citation=NV_RETRIEVER_CITATION,
630
+ citation=LlamaEmbedNemotron_CITATION,
621
631
  )
@@ -206,6 +206,58 @@ model_prompts = {
206
206
  PromptType.document.value: "document",
207
207
  }
208
208
 
209
+ voyage_4_lite = ModelMeta(
210
+ name="voyageai/voyage-4-lite",
211
+ model_type=["dense"],
212
+ revision="1",
213
+ release_date="2026-01-15",
214
+ languages=None, # supported languages not specified
215
+ loader=VoyageModel,
216
+ loader_kwargs=dict(
217
+ max_tokens=32000,
218
+ model_prompts=model_prompts,
219
+ ),
220
+ max_tokens=32000,
221
+ embed_dim=1024,
222
+ open_weights=False,
223
+ n_parameters=None,
224
+ memory_usage_mb=None,
225
+ license=None,
226
+ reference="https://blog.voyageai.com/2026/01/15/voyage-4/",
227
+ similarity_fn_name="cosine",
228
+ framework=["API"],
229
+ use_instructions=True,
230
+ training_datasets=VOYAGE_TRAINING_DATA,
231
+ public_training_code=None,
232
+ public_training_data=None,
233
+ )
234
+
235
+ voyage_4_large = ModelMeta(
236
+ name="voyageai/voyage-4-large",
237
+ model_type=["dense"],
238
+ revision="1",
239
+ release_date="2026-01-15",
240
+ languages=None, # supported languages not specified
241
+ loader=VoyageModel,
242
+ loader_kwargs=dict(
243
+ max_tokens=32000,
244
+ model_prompts=model_prompts,
245
+ ),
246
+ max_tokens=32000,
247
+ embed_dim=1024,
248
+ open_weights=False,
249
+ n_parameters=None,
250
+ memory_usage_mb=None,
251
+ license=None,
252
+ reference="https://blog.voyageai.com/2026/01/15/voyage-4/",
253
+ similarity_fn_name="cosine",
254
+ framework=["API"],
255
+ use_instructions=True,
256
+ training_datasets=VOYAGE_TRAINING_DATA,
257
+ public_training_code=None,
258
+ public_training_data=None,
259
+ )
260
+
209
261
  voyage_3_large = ModelMeta(
210
262
  name="voyageai/voyage-3-large", # Date of publication of this post https://blog.voyageai.com/2025/01/07/voyage-3-large/
211
263
  model_type=["dense"],
@@ -230,6 +282,7 @@ voyage_3_large = ModelMeta(
230
282
  training_datasets=VOYAGE_TRAINING_DATA,
231
283
  public_training_code=None,
232
284
  public_training_data=None,
285
+ superseded_by="voyageai/voyage-4-large",
233
286
  )
234
287
 
235
288
 
@@ -571,6 +624,7 @@ voyage_3_lite = ModelMeta(
571
624
  training_datasets=VOYAGE_TRAINING_DATA,
572
625
  public_training_code=None,
573
626
  public_training_data=None,
627
+ superseded_by="voyageai/voyage-4-lite",
574
628
  )
575
629
 
576
630
  voyage_3_exp = ModelMeta(
@@ -0,0 +1,327 @@
1
+ from __future__ import annotations
2
+
3
+ import atexit
4
+ import gc
5
+ import logging
6
+ import os
7
+ from collections.abc import Callable
8
+ from typing import TYPE_CHECKING, Any, Literal
9
+
10
+ import numpy as np
11
+ import torch
12
+ from torch.utils.data import DataLoader
13
+
14
+ from mteb._requires_package import requires_package
15
+ from mteb.abstasks.task_metadata import TaskMetadata
16
+ from mteb.models import ModelMeta
17
+ from mteb.models.abs_encoder import AbsEncoder
18
+ from mteb.types import Array, BatchedInput, PromptType
19
+
20
+ if TYPE_CHECKING:
21
+ from vllm.config import PoolerConfig # type: ignore[import-not-found]
22
+ else:
23
+ PoolerConfig = dict[str, Any]
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+ Dtype = Literal["half", "float16", "float", "float32", "bfloat16", "auto"]
28
+
29
+
30
+ class VllmWrapperBase:
31
+ """Wrapper for vllm serving engine."""
32
+
33
+ convert = "auto"
34
+ mteb_model_meta: ModelMeta | None = None
35
+
36
+ def __init__(
37
+ self,
38
+ model: str | ModelMeta,
39
+ revision: str | None = None,
40
+ *,
41
+ trust_remote_code: bool = True,
42
+ dtype: Dtype = "auto",
43
+ head_dtype: Literal["model"] | Dtype | None = None,
44
+ max_model_len: int | None = None,
45
+ max_num_batched_tokens: int | None = None,
46
+ max_num_seqs: int = 128,
47
+ tensor_parallel_size: int = 1,
48
+ enable_prefix_caching: bool | None = None,
49
+ gpu_memory_utilization: float = 0.9,
50
+ hf_overrides: dict[str, Any] | None = None,
51
+ pooler_config: PoolerConfig | None = None,
52
+ enforce_eager: bool = False,
53
+ **kwargs: Any,
54
+ ):
55
+ """Wrapper for vllm serving engine.
56
+
57
+ Args:
58
+ model: model name string.
59
+ revision: The revision of the model to use.
60
+ trust_remote_code: Whether to trust remote code execution when loading the model.
61
+ Should be True for models with custom code.
62
+ dtype: Data type for model weights. "auto" will automatically select appropriate
63
+ dtype based on hardware and model capabilities. vllm uses flash attention by
64
+ default, which does not support fp32. Therefore, it defaults to using fp16 for
65
+ inference on fp32 models. Testing has shown a relatively small drop in accuracy.
66
+ You can manually opt for fp32, but inference speed will be very slow.
67
+ head_dtype: "head" refers to the last Linear layer(s) of an LLMs, such as the score
68
+ or classifier in a classification model. Uses fp32 for the head by default to
69
+ gain extra precision.
70
+ max_model_len: Maximum sequence length (context window) supported by the model.
71
+ If None, uses the model's default maximum length.
72
+ max_num_batched_tokens: Maximum number of tokens to process in a single batch.
73
+ If None, automatically determined.
74
+ max_num_seqs: Maximum number of sequences to process concurrently.
75
+ tensor_parallel_size: Number of GPUs for tensor parallelism.
76
+ enable_prefix_caching: Whether to enable KV cache sharing for common prompt prefixes.
77
+ If None, uses the model's default setting.
78
+ gpu_memory_utilization: Target GPU memory utilization ratio (0.0 to 1.0).
79
+ hf_overrides: Dictionary mapping Hugging Face configuration keys to override values.
80
+ pooler_config: Controls the behavior of output pooling in pooling models.
81
+ enforce_eager: Whether to disable CUDA graph optimization and use eager execution.
82
+ **kwargs: Additional arguments to pass to the vllm serving engine model.
83
+ """
84
+ requires_package(
85
+ self,
86
+ "vllm",
87
+ "Wrapper for vllm serving engine",
88
+ install_instruction="pip install mteb[vllm]",
89
+ )
90
+
91
+ os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"
92
+
93
+ from vllm import LLM, EngineArgs
94
+
95
+ hf_overrides = {} if hf_overrides is None else hf_overrides
96
+
97
+ if head_dtype is not None:
98
+ hf_overrides["head_dtype"] = head_dtype
99
+
100
+ model_name = model if isinstance(model, str) else model.name
101
+
102
+ if isinstance(model, ModelMeta):
103
+ logger.info(
104
+ "Using revision from model meta. Passed revision will be ignored"
105
+ )
106
+ revision = model.revision
107
+
108
+ args = EngineArgs(
109
+ model=model_name,
110
+ revision=revision,
111
+ runner="pooling",
112
+ convert=self.convert, # type: ignore[arg-type]
113
+ max_model_len=max_model_len,
114
+ max_num_batched_tokens=max_num_batched_tokens,
115
+ max_num_seqs=max_num_seqs,
116
+ tensor_parallel_size=tensor_parallel_size,
117
+ enable_prefix_caching=enable_prefix_caching,
118
+ gpu_memory_utilization=gpu_memory_utilization,
119
+ hf_overrides=hf_overrides,
120
+ pooler_config=pooler_config,
121
+ enforce_eager=enforce_eager,
122
+ trust_remote_code=trust_remote_code,
123
+ dtype=dtype,
124
+ **kwargs,
125
+ )
126
+ self.llm = LLM(**vars(args))
127
+
128
+ if isinstance(model, str):
129
+ self.mteb_model_meta = ModelMeta.from_hub(model=model, revision=revision)
130
+ else:
131
+ self.mteb_model_meta = model
132
+
133
+ atexit.register(self.cleanup)
134
+
135
+ def cleanup(self):
136
+ """Clean up the VLLM distributed runtime environment and release GPU resources."""
137
+ if self.llm is None:
138
+ return
139
+
140
+ from vllm.distributed import ( # type: ignore[import-not-found]
141
+ cleanup_dist_env_and_memory,
142
+ )
143
+
144
+ self.llm = None
145
+ gc.collect()
146
+ cleanup_dist_env_and_memory()
147
+
148
+ def __del__(self):
149
+ try:
150
+ self.cleanup()
151
+ except Exception:
152
+ pass
153
+
154
+
155
+ class VllmEncoderWrapper(AbsEncoder, VllmWrapperBase):
156
+ """vLLM wrapper for Encoder models.
157
+
158
+ Args:
159
+ model: model name string or ModelMeta.
160
+ revision: The revision of the model to use.
161
+ prompt_dict: A dictionary mapping task names to prompt strings.
162
+ use_instructions: Whether to use instructions from the prompt_dict.
163
+ When False, values from prompt_dict are used as static prompts (prefixes).
164
+ When True, values from prompt_dict are used as instructions to be formatted
165
+ using the instruction_template.
166
+ instruction_template: A template or callable to format instructions.
167
+ Can be a string with '{instruction}' placeholder or a callable that takes
168
+ the instruction and prompt type and returns a formatted string.
169
+ apply_instruction_to_documents: Whether to apply instructions to documents prompts.
170
+ **kwargs: Additional arguments to pass to the vllm serving engine model.
171
+ """
172
+
173
+ convert = "embed"
174
+
175
+ def __init__(
176
+ self,
177
+ model: str | ModelMeta,
178
+ revision: str | None = None,
179
+ prompt_dict: dict[str, str] | None = None,
180
+ use_instructions: bool = False,
181
+ instruction_template: (
182
+ str | Callable[[str, PromptType | None], str] | None
183
+ ) = None,
184
+ apply_instruction_to_documents: bool = True,
185
+ **kwargs: Any,
186
+ ):
187
+ if use_instructions and instruction_template is None:
188
+ raise ValueError(
189
+ "To use instructions, an instruction_template must be provided. "
190
+ "For example, `Instruction: {instruction}`"
191
+ )
192
+
193
+ if (
194
+ isinstance(instruction_template, str)
195
+ and "{instruction}" not in instruction_template
196
+ ):
197
+ raise ValueError(
198
+ "Instruction template must contain the string '{instruction}'."
199
+ )
200
+
201
+ self.prompts_dict = prompt_dict
202
+ self.use_instructions = use_instructions
203
+ self.instruction_template = instruction_template
204
+ self.apply_instruction_to_passages = apply_instruction_to_documents
205
+ super().__init__(
206
+ model,
207
+ revision,
208
+ **kwargs,
209
+ )
210
+
211
+ def encode(
212
+ self,
213
+ inputs: DataLoader[BatchedInput],
214
+ *,
215
+ task_metadata: TaskMetadata,
216
+ hf_split: str,
217
+ hf_subset: str,
218
+ prompt_type: PromptType | None = None,
219
+ **kwargs: Any,
220
+ ) -> Array:
221
+ """Encodes the given sentences using the encoder.
222
+
223
+ Args:
224
+ inputs: The sentences to encode.
225
+ task_metadata: The metadata of the task. Sentence-transformers uses this to
226
+ determine which prompt to use from a specified dictionary.
227
+ prompt_type: The name type of prompt. (query or passage)
228
+ hf_split: Split of current task
229
+ hf_subset: Subset of current task
230
+ **kwargs: Additional arguments to pass to the encoder.
231
+
232
+ Returns:
233
+ The encoded sentences.
234
+ """
235
+ prompt = ""
236
+ if self.use_instructions and self.prompts_dict is not None:
237
+ prompt = self.get_task_instruction(task_metadata, prompt_type)
238
+ elif self.prompts_dict is not None:
239
+ prompt_name = self.get_prompt_name(task_metadata, prompt_type)
240
+ if prompt_name is not None:
241
+ prompt = self.prompts_dict.get(prompt_name, "")
242
+
243
+ if (
244
+ self.use_instructions
245
+ and self.apply_instruction_to_passages is False
246
+ and prompt_type == PromptType.document
247
+ ):
248
+ logger.info(
249
+ f"No instruction used, because prompt type = {prompt_type.document}"
250
+ )
251
+ prompt = ""
252
+ else:
253
+ logger.info(
254
+ f"Using instruction: '{prompt}' for task: '{task_metadata.name}' prompt type: '{prompt_type}'"
255
+ )
256
+
257
+ prompts = [prompt + text for batch in inputs for text in batch["text"]]
258
+ outputs = self.llm.encode(
259
+ prompts, pooling_task="embed", truncate_prompt_tokens=-1
260
+ )
261
+ embeddings = torch.stack([output.outputs.data for output in outputs])
262
+ return embeddings
263
+
264
+
265
+ class VllmCrossEncoderWrapper(VllmWrapperBase):
266
+ """vLLM wrapper for CrossEncoder models."""
267
+
268
+ convert = "classify"
269
+
270
+ def __init__(
271
+ self,
272
+ model: str | ModelMeta,
273
+ revision: str | None = None,
274
+ query_prefix: str = "",
275
+ document_prefix: str = "",
276
+ **kwargs: Any,
277
+ ):
278
+ super().__init__(
279
+ model,
280
+ revision,
281
+ **kwargs,
282
+ )
283
+ self.query_prefix = query_prefix
284
+ self.document_prefix = document_prefix
285
+
286
+ def predict(
287
+ self,
288
+ inputs1: DataLoader[BatchedInput],
289
+ inputs2: DataLoader[BatchedInput],
290
+ *,
291
+ task_metadata: TaskMetadata,
292
+ hf_split: str,
293
+ hf_subset: str,
294
+ prompt_type: PromptType | None = None,
295
+ **kwargs: Any,
296
+ ) -> Array:
297
+ """Predicts relevance scores for pairs of inputs. Note that, unlike the encoder, the cross-encoder can compare across inputs.
298
+
299
+ Args:
300
+ inputs1: First Dataloader of inputs to encode. For reranking tasks, these are queries (for text only tasks `QueryDatasetType`).
301
+ inputs2: Second Dataloader of inputs to encode. For reranking, these are documents (for text only tasks `RetrievalOutputType`).
302
+ task_metadata: Metadata of the current task.
303
+ hf_split: Split of current task, allows to know some additional information about current split.
304
+ E.g. Current language
305
+ hf_subset: Subset of current task. Similar to `hf_split` to get more information
306
+ prompt_type: The name type of prompt. (query or passage)
307
+ **kwargs: Additional arguments to pass to the cross-encoder.
308
+
309
+ Returns:
310
+ The predicted relevance scores for each inputs pair.
311
+ """
312
+ queries = [
313
+ self.query_prefix + text for batch in inputs1 for text in batch["text"]
314
+ ]
315
+ corpus = [
316
+ self.document_prefix + text for batch in inputs2 for text in batch["text"]
317
+ ]
318
+ # TODO: support score prompt
319
+
320
+ outputs = self.llm.score(
321
+ queries,
322
+ corpus,
323
+ truncate_prompt_tokens=-1,
324
+ use_tqdm=False,
325
+ )
326
+ scores = np.array([output.outputs.score for output in outputs])
327
+ return scores
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mteb
3
- Version: 2.6.8
3
+ Version: 2.7.0
4
4
  Summary: Massive Text Embedding Benchmark
5
5
  Author-email: MTEB Contributors <niklas@huggingface.co>, Kenneth Enevoldsen <kenneth.enevoldsen@cas.au.dk>, Nouamane Tazi <nouamane@huggingface.co>, Nils Reimers <info@nils-reimers.de>
6
6
  Maintainer-email: Kenneth Enevoldsen <kenneth.enevoldsen@cas.au.dk>, Roman Solomatin <risolomatin@gmail.com>, Isaac Chung <chungisaac1217@gmail.com>
@@ -32,6 +32,8 @@ Requires-Dist: rich>=0.0.0
32
32
  Requires-Dist: pytrec-eval-terrier>=0.5.6
33
33
  Requires-Dist: pydantic>=2.0.0
34
34
  Requires-Dist: polars>=0.20.22
35
+ Requires-Dist: torch; python_full_version < "3.14"
36
+ Requires-Dist: torch>=2.9.0; python_full_version >= "3.14"
35
37
  Provides-Extra: image
36
38
  Requires-Dist: torchvision>0.2.1; extra == "image"
37
39
  Requires-Dist: transformers[torch-vision,vision]; extra == "image"
@@ -108,6 +110,8 @@ Provides-Extra: faiss-cpu
108
110
  Requires-Dist: faiss-cpu>=1.12.0; extra == "faiss-cpu"
109
111
  Provides-Extra: eager-embed
110
112
  Requires-Dist: qwen_vl_utils>=0.0.14; extra == "eager-embed"
113
+ Provides-Extra: vllm
114
+ Requires-Dist: vllm>=0.11.1; extra == "vllm"
111
115
  Dynamic: license-file
112
116
 
113
117
  <h1 align="center">
@@ -1463,6 +1463,7 @@ mteb/models/model_meta.py,sha256=x8EuA8Zpc4DqhK_50v5TAZ7n2J2yhHqf5U0ldCpPnw0,311
1463
1463
  mteb/models/models_protocols.py,sha256=5WYOZw3-T-wK7ux0YZVCfbcMTkAisqAqbu44ZNoir4A,9250
1464
1464
  mteb/models/search_wrappers.py,sha256=9lXLXUyL6atMCwXp-HBUong6msT3UAUY9QI7zKXbSVU,20945
1465
1465
  mteb/models/sentence_transformer_wrapper.py,sha256=3sAev15a07ULA_ikFBBsMta0toy9AGPBbBPi37j_K-A,12932
1466
+ mteb/models/vllm_wrapper.py,sha256=ebX4JIKPoxW4PIlr3BnaoLGuMGRpHzBe_ZwvFscx1D0,12286
1466
1467
  mteb/models/cache_wrappers/__init__.py,sha256=1w1TnMwulWJSzNkLXjbh5MY3sqgHWc6vUntYn49i9X8,169
1467
1468
  mteb/models/cache_wrappers/cache_backend_protocol.py,sha256=iGWdqDEoaCxUVEnwsXhy-m9d2QX8KTaQ9m2ZyawrMes,1634
1468
1469
  mteb/models/cache_wrappers/cache_wrapper.py,sha256=GPC0UhHfkUH-i-Q4HdFvBev6h6GtMlWEId_B3tL-J54,6600
@@ -1544,8 +1545,8 @@ mteb/models/model_implementations/nbailab.py,sha256=LM00HJIr4yrA45qh2O21BIDXku9K
1544
1545
  mteb/models/model_implementations/no_instruct_sentence_models.py,sha256=qLiMok_OxKvIYXWnP0KNWqH1monZx-OdSZrSx3QEhtI,4049
1545
1546
  mteb/models/model_implementations/nomic_models.py,sha256=dmQC_cWg6hAmiBHK7fXoXEiGBJnJvrq0RsnCcJ2qe1Q,15137
1546
1547
  mteb/models/model_implementations/nomic_models_vision.py,sha256=usCKfZCR7aEi_DnNmVAYjH-lXx_ipQkBVtUAmhJ90QI,6870
1547
- mteb/models/model_implementations/nvidia_llama_nemoretriever_colemb.py,sha256=6dTGtK1GiaYdpJ4IQFgCCOkGyHQyuEUatKs-Uv-1YmE,6450
1548
- mteb/models/model_implementations/nvidia_models.py,sha256=JMy0x7EWGrAxZ9s63F2vSPdPS-9yF3RIS4uj3N2UrVI,24315
1548
+ mteb/models/model_implementations/nvidia_llama_nemoretriever_colemb.py,sha256=DR66nmrIw1dgq7I1AcdgSC-ZqE29dsszVnHsrMxyCT4,6450
1549
+ mteb/models/model_implementations/nvidia_models.py,sha256=XnNiyByz6EFrISz1Msb-cXLVQfKnYP5HMRzAXC1KPDo,24937
1549
1550
  mteb/models/model_implementations/octen_models.py,sha256=FFR1-yG2euN-6kgL4qJNHYB6cPsds4NGYFPmc5tHhoE,8514
1550
1551
  mteb/models/model_implementations/openai_models.py,sha256=905BajYi_XyOZgqU3AeKpwIttLoUitaAyc48sTWI6Jg,9482
1551
1552
  mteb/models/model_implementations/openclip_models.py,sha256=MyosgeYSrgBXGuGFtI2Tyxksxpb7bADFJVSYFCLweVA,11622
@@ -1591,7 +1592,7 @@ mteb/models/model_implementations/vdr_models.py,sha256=8jlfABvO7Z9ebzAPFHqln3B2I
1591
1592
  mteb/models/model_implementations/vi_vn_models.py,sha256=Ep2zj4Xvjyu0a_YiLsYvolKdMGSOtzm-N-yNyXmfNwA,6328
1592
1593
  mteb/models/model_implementations/vista_models.py,sha256=GkQFHIwwjxwM0wDuo-dWJBo4dLExlHtHfXwhcdKA5uQ,10884
1593
1594
  mteb/models/model_implementations/vlm2vec_models.py,sha256=EeWl3kpS_1VDJs4t1QmpaWSuglLPL2GyZu27fVY1VT8,11802
1594
- mteb/models/model_implementations/voyage_models.py,sha256=5A5RD2A6B20qLDVEpWL0TNMQOf5hnTVXdBugdh5q4d0,20214
1595
+ mteb/models/model_implementations/voyage_models.py,sha256=BdAHT7tpLVu9CMDdX9LzJKAJ6CncZKYIfMk2XdNKxV8,21707
1595
1596
  mteb/models/model_implementations/voyage_v.py,sha256=eFdSOKka5VoLjViZk5umlgTw_ETjyXv4yhZ9SoCR-p0,8124
1596
1597
  mteb/models/model_implementations/xyz_models.py,sha256=gjwCx3U4AxMcJDTSWVoYV6xeyXLw7lUZI5D6Q7JjWho,1322
1597
1598
  mteb/models/model_implementations/youtu_models.py,sha256=THwWRabutW-qC-JZOVhxXWjKHVyMElzt_xm81ixzN50,5995
@@ -2617,9 +2618,9 @@ mteb/types/_metadata.py,sha256=NN-W0S6a5TDV7UkpRx1pyWtGF4TyyCyoPUfHOwdeci8,2290
2617
2618
  mteb/types/_result.py,sha256=UKNokV9pu3G74MGebocU512aU_fFU9I9nPKnrG9Q0iE,1035
2618
2619
  mteb/types/_string_validators.py,sha256=PY-dYq4E8O50VS3bLYdldPWp400fl_WzUjfVSkNWe8U,523
2619
2620
  mteb/types/statistics.py,sha256=GwkBPmAr18Onu-vHtzHs0PFrhCozdOMiT13HwnWL4ZM,3961
2620
- mteb-2.6.8.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
2621
- mteb-2.6.8.dist-info/METADATA,sha256=WuENjBwmboXEdotPTAcW0lgGdPfZVWxLbOem6RkweA4,14281
2622
- mteb-2.6.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
2623
- mteb-2.6.8.dist-info/entry_points.txt,sha256=8IJoEJFKoDHmVnNev-qJ9pp4Ln7_1-ma9QsXnzVCzGU,39
2624
- mteb-2.6.8.dist-info/top_level.txt,sha256=OLVIjcQAlWBz0bdmutKlWHLF42FF0hp4uVAg3ZyiG4U,5
2625
- mteb-2.6.8.dist-info/RECORD,,
2621
+ mteb-2.7.0.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
2622
+ mteb-2.7.0.dist-info/METADATA,sha256=ww41PdZGaQnKWIX3vetD7jRnSf7O36TDKY7OSFGa1aE,14457
2623
+ mteb-2.7.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
2624
+ mteb-2.7.0.dist-info/entry_points.txt,sha256=8IJoEJFKoDHmVnNev-qJ9pp4Ln7_1-ma9QsXnzVCzGU,39
2625
+ mteb-2.7.0.dist-info/top_level.txt,sha256=OLVIjcQAlWBz0bdmutKlWHLF42FF0hp4uVAg3ZyiG4U,5
2626
+ mteb-2.7.0.dist-info/RECORD,,
File without changes