mteb 2.6.2__py3-none-any.whl → 2.6.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mteb/models/get_model_meta.py +6 -2
- mteb/models/instruct_wrapper.py +17 -3
- mteb/models/model_implementations/bmretriever_models.py +2 -0
- mteb/models/model_implementations/cde_models.py +9 -2
- mteb/models/model_implementations/e5_v.py +5 -5
- mteb/models/model_implementations/jina_models.py +6 -5
- mteb/models/model_implementations/no_instruct_sentence_models.py +1 -1
- mteb/models/model_implementations/nomic_models.py +4 -1
- mteb/models/model_implementations/nvidia_models.py +2 -1
- mteb/models/model_meta.py +3 -1
- mteb/models/models_protocols.py +16 -2
- mteb/models/sentence_transformer_wrapper.py +10 -4
- {mteb-2.6.2.dist-info → mteb-2.6.4.dist-info}/METADATA +1 -1
- {mteb-2.6.2.dist-info → mteb-2.6.4.dist-info}/RECORD +18 -18
- {mteb-2.6.2.dist-info → mteb-2.6.4.dist-info}/WHEEL +0 -0
- {mteb-2.6.2.dist-info → mteb-2.6.4.dist-info}/entry_points.txt +0 -0
- {mteb-2.6.2.dist-info → mteb-2.6.4.dist-info}/licenses/LICENSE +0 -0
- {mteb-2.6.2.dist-info → mteb-2.6.4.dist-info}/top_level.txt +0 -0
mteb/models/get_model_meta.py
CHANGED
|
@@ -82,7 +82,10 @@ def get_model_metas(
|
|
|
82
82
|
|
|
83
83
|
|
|
84
84
|
def get_model(
|
|
85
|
-
model_name: str,
|
|
85
|
+
model_name: str,
|
|
86
|
+
revision: str | None = None,
|
|
87
|
+
device: str | None = None,
|
|
88
|
+
**kwargs: Any,
|
|
86
89
|
) -> MTEBModels:
|
|
87
90
|
"""A function to fetch and load model object by name.
|
|
88
91
|
|
|
@@ -92,13 +95,14 @@ def get_model(
|
|
|
92
95
|
Args:
|
|
93
96
|
model_name: Name of the model to fetch
|
|
94
97
|
revision: Revision of the model to fetch
|
|
98
|
+
device: Device used to load the model
|
|
95
99
|
**kwargs: Additional keyword arguments to pass to the model loader
|
|
96
100
|
|
|
97
101
|
Returns:
|
|
98
102
|
A model object
|
|
99
103
|
"""
|
|
100
104
|
meta = get_model_meta(model_name, revision)
|
|
101
|
-
model = meta.load_model(**kwargs)
|
|
105
|
+
model = meta.load_model(device=device, **kwargs)
|
|
102
106
|
|
|
103
107
|
if kwargs:
|
|
104
108
|
logger.info(
|
mteb/models/instruct_wrapper.py
CHANGED
|
@@ -18,6 +18,7 @@ def instruct_wrapper(
|
|
|
18
18
|
model_name_or_path: str,
|
|
19
19
|
mode: str,
|
|
20
20
|
instruction_template: str | Callable[[str, PromptType | None], str] | None = None,
|
|
21
|
+
device: str | None = None,
|
|
21
22
|
**kwargs,
|
|
22
23
|
):
|
|
23
24
|
"""Instruct wrapper for models. Uses GritLM to pass instructions to the model.
|
|
@@ -28,6 +29,7 @@ def instruct_wrapper(
|
|
|
28
29
|
model_name_or_path: Model name or path.
|
|
29
30
|
mode: Mode of the model. Either 'query' or 'passage'.
|
|
30
31
|
instruction_template: Instruction template. Should contain the string '{instruction}'.
|
|
32
|
+
device: Device used to load the model.
|
|
31
33
|
**kwargs: Additional arguments to pass to the model.
|
|
32
34
|
"""
|
|
33
35
|
requires_package(
|
|
@@ -40,6 +42,7 @@ def instruct_wrapper(
|
|
|
40
42
|
self,
|
|
41
43
|
model_name_or_path: str,
|
|
42
44
|
mode: str,
|
|
45
|
+
device: str | None = None,
|
|
43
46
|
instruction_template: str
|
|
44
47
|
| Callable[[str, PromptType | None], str]
|
|
45
48
|
| None = None,
|
|
@@ -63,7 +66,12 @@ def instruct_wrapper(
|
|
|
63
66
|
)
|
|
64
67
|
|
|
65
68
|
self.instruction_template = instruction_template
|
|
66
|
-
super().__init__(
|
|
69
|
+
super().__init__(
|
|
70
|
+
model_name_or_path=model_name_or_path,
|
|
71
|
+
mode=mode,
|
|
72
|
+
device=device,
|
|
73
|
+
**kwargs,
|
|
74
|
+
)
|
|
67
75
|
|
|
68
76
|
def encode(
|
|
69
77
|
self,
|
|
@@ -95,7 +103,9 @@ def instruct_wrapper(
|
|
|
95
103
|
embeddings = embeddings.cpu().detach().float().numpy()
|
|
96
104
|
return embeddings
|
|
97
105
|
|
|
98
|
-
return InstructGritLMModel(
|
|
106
|
+
return InstructGritLMModel(
|
|
107
|
+
model_name_or_path, mode, instruction_template=instruction_template, **kwargs
|
|
108
|
+
)
|
|
99
109
|
|
|
100
110
|
|
|
101
111
|
class InstructSentenceTransformerModel(AbsEncoder):
|
|
@@ -105,6 +115,7 @@ class InstructSentenceTransformerModel(AbsEncoder):
|
|
|
105
115
|
self,
|
|
106
116
|
model_name: str,
|
|
107
117
|
revision: str,
|
|
118
|
+
device: str | None = None,
|
|
108
119
|
instruction_template: str
|
|
109
120
|
| Callable[[str, PromptType | None], str]
|
|
110
121
|
| None = None,
|
|
@@ -122,6 +133,7 @@ class InstructSentenceTransformerModel(AbsEncoder):
|
|
|
122
133
|
Arguments:
|
|
123
134
|
model_name: Model name of the sentence transformers model.
|
|
124
135
|
revision: Revision of the sentence transformers model.
|
|
136
|
+
device: Device used to load the model.
|
|
125
137
|
instruction_template: Model template. Should contain the string '{instruction}'.
|
|
126
138
|
max_seq_length: Maximum sequence length. If None, the maximum sequence length will be read from the model config.
|
|
127
139
|
apply_instruction_to_passages: Whether to apply the instruction template to the passages.
|
|
@@ -158,7 +170,9 @@ class InstructSentenceTransformerModel(AbsEncoder):
|
|
|
158
170
|
kwargs.setdefault("tokenizer_kwargs", {}).update(tokenizer_params)
|
|
159
171
|
|
|
160
172
|
self.model_name = model_name
|
|
161
|
-
self.model = SentenceTransformer(
|
|
173
|
+
self.model = SentenceTransformer(
|
|
174
|
+
model_name, revision=revision, device=device, **kwargs
|
|
175
|
+
)
|
|
162
176
|
if max_seq_length:
|
|
163
177
|
# https://github.com/huggingface/sentence-transformers/issues/3575
|
|
164
178
|
self.model.max_seq_length = max_seq_length
|
|
@@ -25,6 +25,7 @@ class BMRetrieverWrapper(InstructSentenceTransformerModel):
|
|
|
25
25
|
self,
|
|
26
26
|
model_name: str,
|
|
27
27
|
revision: str,
|
|
28
|
+
device: str | None = None,
|
|
28
29
|
instruction_template: str
|
|
29
30
|
| Callable[[str, PromptType | None], str]
|
|
30
31
|
| None = None,
|
|
@@ -52,6 +53,7 @@ class BMRetrieverWrapper(InstructSentenceTransformerModel):
|
|
|
52
53
|
|
|
53
54
|
transformer = Transformer(
|
|
54
55
|
model_name,
|
|
56
|
+
device=device,
|
|
55
57
|
**kwargs,
|
|
56
58
|
)
|
|
57
59
|
pooling = Pooling(
|
|
@@ -49,10 +49,17 @@ class CDEWrapper(SentenceTransformerEncoderWrapper):
|
|
|
49
49
|
"InstructionReranking",
|
|
50
50
|
)
|
|
51
51
|
|
|
52
|
-
def __init__(
|
|
52
|
+
def __init__(
|
|
53
|
+
self,
|
|
54
|
+
model: str,
|
|
55
|
+
revision: str | None = None,
|
|
56
|
+
device: str | None = None,
|
|
57
|
+
*args,
|
|
58
|
+
**kwargs: Any,
|
|
59
|
+
) -> None:
|
|
53
60
|
from transformers import AutoConfig
|
|
54
61
|
|
|
55
|
-
super().__init__(model, *args, **kwargs)
|
|
62
|
+
super().__init__(model, revision=revision, device=device, *args, **kwargs)
|
|
56
63
|
model_config = AutoConfig.from_pretrained(model, trust_remote_code=True)
|
|
57
64
|
self.max_sentences = model_config.transductive_corpus_size
|
|
58
65
|
|
|
@@ -30,6 +30,7 @@ class E5VModel(AbsEncoder):
|
|
|
30
30
|
self,
|
|
31
31
|
model_name: str,
|
|
32
32
|
revision: str,
|
|
33
|
+
device: str | None = None,
|
|
33
34
|
composed_prompt=None,
|
|
34
35
|
**kwargs: Any,
|
|
35
36
|
):
|
|
@@ -47,8 +48,7 @@ class E5VModel(AbsEncoder):
|
|
|
47
48
|
self.processor = LlavaNextProcessor.from_pretrained(
|
|
48
49
|
model_name, revision=revision
|
|
49
50
|
)
|
|
50
|
-
|
|
51
|
-
self.device = kwargs.pop("device")
|
|
51
|
+
self.device = device or ("cuda" if torch.cuda.is_available() else "cpu")
|
|
52
52
|
self.model = LlavaNextForConditionalGeneration.from_pretrained(
|
|
53
53
|
model_name, revision=revision, **kwargs
|
|
54
54
|
)
|
|
@@ -87,7 +87,7 @@ class E5VModel(AbsEncoder):
|
|
|
87
87
|
],
|
|
88
88
|
return_tensors="pt",
|
|
89
89
|
padding=True,
|
|
90
|
-
).to(
|
|
90
|
+
).to(self.device)
|
|
91
91
|
text_outputs = self.model(
|
|
92
92
|
**text_inputs, output_hidden_states=True, return_dict=True
|
|
93
93
|
).hidden_states[-1][:, -1, :]
|
|
@@ -111,7 +111,7 @@ class E5VModel(AbsEncoder):
|
|
|
111
111
|
batch["image"],
|
|
112
112
|
return_tensors="pt",
|
|
113
113
|
padding=True,
|
|
114
|
-
).to(
|
|
114
|
+
).to(self.device)
|
|
115
115
|
image_outputs = self.model(
|
|
116
116
|
**img_inputs, output_hidden_states=True, return_dict=True
|
|
117
117
|
).hidden_states[-1][:, -1, :]
|
|
@@ -141,7 +141,7 @@ class E5VModel(AbsEncoder):
|
|
|
141
141
|
]
|
|
142
142
|
inputs = self.processor(
|
|
143
143
|
prompts, batch["image"], return_tensors="pt", padding=True
|
|
144
|
-
).to(
|
|
144
|
+
).to(self.device)
|
|
145
145
|
outputs = self.model(
|
|
146
146
|
**inputs, output_hidden_states=True, return_dict=True
|
|
147
147
|
).hidden_states[-1][:, -1, :]
|
|
@@ -257,6 +257,7 @@ class JinaRerankerV3Wrapper(CrossEncoderWrapper):
|
|
|
257
257
|
self,
|
|
258
258
|
model: CrossEncoder | str,
|
|
259
259
|
revision: str | None = None,
|
|
260
|
+
device: str | None = None,
|
|
260
261
|
trust_remote_code: bool = True,
|
|
261
262
|
**kwargs: Any,
|
|
262
263
|
) -> None:
|
|
@@ -267,10 +268,7 @@ class JinaRerankerV3Wrapper(CrossEncoderWrapper):
|
|
|
267
268
|
model, trust_remote_code=trust_remote_code, dtype="auto"
|
|
268
269
|
)
|
|
269
270
|
|
|
270
|
-
device =
|
|
271
|
-
if device is None:
|
|
272
|
-
device = get_device_name()
|
|
273
|
-
logger.info(f"Use pytorch device: {device}")
|
|
271
|
+
device = device or get_device_name()
|
|
274
272
|
|
|
275
273
|
self.model.to(device)
|
|
276
274
|
self.model.eval()
|
|
@@ -320,6 +318,7 @@ class JinaWrapper(SentenceTransformerEncoderWrapper):
|
|
|
320
318
|
self,
|
|
321
319
|
model: str,
|
|
322
320
|
revision: str,
|
|
321
|
+
device: str | None = None,
|
|
323
322
|
model_prompts: dict[str, str] | None = None,
|
|
324
323
|
**kwargs,
|
|
325
324
|
) -> None:
|
|
@@ -339,7 +338,9 @@ class JinaWrapper(SentenceTransformerEncoderWrapper):
|
|
|
339
338
|
)
|
|
340
339
|
import flash_attn # noqa: F401
|
|
341
340
|
|
|
342
|
-
super().__init__(
|
|
341
|
+
super().__init__(
|
|
342
|
+
model, revision, device=device, model_prompts=model_prompts, **kwargs
|
|
343
|
+
)
|
|
343
344
|
|
|
344
345
|
def encode(
|
|
345
346
|
self,
|
|
@@ -30,13 +30,13 @@ class NoInstructModel(AbsEncoder):
|
|
|
30
30
|
self,
|
|
31
31
|
model_name: str,
|
|
32
32
|
revision: str,
|
|
33
|
+
device: str | None = None,
|
|
33
34
|
model_prompts: dict[str, str] | None = None,
|
|
34
35
|
**kwargs: Any,
|
|
35
36
|
):
|
|
36
37
|
from transformers import AutoModel, AutoTokenizer
|
|
37
38
|
|
|
38
39
|
self.model_name = model_name
|
|
39
|
-
device = kwargs.pop("device", None)
|
|
40
40
|
self.device = device or ("cuda" if torch.cuda.is_available() else "cpu")
|
|
41
41
|
self.model = AutoModel.from_pretrained(
|
|
42
42
|
model_name, revision=revision, **kwargs
|
|
@@ -23,6 +23,7 @@ class NomicWrapper(SentenceTransformerEncoderWrapper):
|
|
|
23
23
|
self,
|
|
24
24
|
model_name: str,
|
|
25
25
|
revision: str,
|
|
26
|
+
device: str | None = None,
|
|
26
27
|
model_prompts: dict[str, str] | None = None,
|
|
27
28
|
**kwargs: Any,
|
|
28
29
|
):
|
|
@@ -37,7 +38,9 @@ class NomicWrapper(SentenceTransformerEncoderWrapper):
|
|
|
37
38
|
f"Current transformers version is {transformers.__version__} is lower than the required version"
|
|
38
39
|
f" {MODERN_BERT_TRANSFORMERS_MIN_VERSION}"
|
|
39
40
|
)
|
|
40
|
-
super().__init__(
|
|
41
|
+
super().__init__(
|
|
42
|
+
model_name, revision, device=device, model_prompts=model_prompts, **kwargs
|
|
43
|
+
)
|
|
41
44
|
|
|
42
45
|
def to(self, device: torch.device) -> None:
|
|
43
46
|
self.model.to(device)
|
|
@@ -337,6 +337,7 @@ class LlamaEmbedNemotron(AbsEncoder):
|
|
|
337
337
|
self,
|
|
338
338
|
model_name: str,
|
|
339
339
|
revision: str,
|
|
340
|
+
device: str | None = None,
|
|
340
341
|
) -> None:
|
|
341
342
|
required_transformers_version = "4.51.0"
|
|
342
343
|
if Version(transformers_version) != Version(required_transformers_version):
|
|
@@ -355,7 +356,7 @@ class LlamaEmbedNemotron(AbsEncoder):
|
|
|
355
356
|
self.attn_implementation = (
|
|
356
357
|
"flash_attention_2" if torch.cuda.is_available() else "eager"
|
|
357
358
|
)
|
|
358
|
-
self.device =
|
|
359
|
+
self.device = device or ("cuda" if torch.cuda.is_available() else "cpu")
|
|
359
360
|
self.task_prompts = TASK_PROMPTS
|
|
360
361
|
self.instruction_template = self._instruction_template
|
|
361
362
|
|
mteb/models/model_meta.py
CHANGED
|
@@ -251,7 +251,7 @@ class ModelMeta(BaseModel):
|
|
|
251
251
|
)
|
|
252
252
|
return v
|
|
253
253
|
|
|
254
|
-
def load_model(self, **kwargs: Any) -> MTEBModels:
|
|
254
|
+
def load_model(self, device: str | None = None, **kwargs: Any) -> MTEBModels:
|
|
255
255
|
"""Loads the model using the specified loader function."""
|
|
256
256
|
if self.loader is None:
|
|
257
257
|
raise NotImplementedError(
|
|
@@ -263,6 +263,8 @@ class ModelMeta(BaseModel):
|
|
|
263
263
|
# Allow overwrites
|
|
264
264
|
_kwargs = self.loader_kwargs.copy()
|
|
265
265
|
_kwargs.update(kwargs)
|
|
266
|
+
if device is not None:
|
|
267
|
+
_kwargs["device"] = device
|
|
266
268
|
|
|
267
269
|
model: MTEBModels = self.loader(self.name, revision=self.revision, **_kwargs)
|
|
268
270
|
model.mteb_model_meta = self # type: ignore[misc]
|
mteb/models/models_protocols.py
CHANGED
|
@@ -83,12 +83,19 @@ class EncoderProtocol(Protocol):
|
|
|
83
83
|
In general the interface is kept aligned with sentence-transformers interface. In cases where exceptions occurs these are handled within MTEB.
|
|
84
84
|
"""
|
|
85
85
|
|
|
86
|
-
def __init__(
|
|
86
|
+
def __init__(
|
|
87
|
+
self,
|
|
88
|
+
model_name: str,
|
|
89
|
+
revision: str | None,
|
|
90
|
+
device: str | None = None,
|
|
91
|
+
**kwargs: Any,
|
|
92
|
+
) -> None:
|
|
87
93
|
"""The initialization function for the encoder. Used when calling it from the mteb run CLI.
|
|
88
94
|
|
|
89
95
|
Args:
|
|
90
96
|
model_name: Name of the model
|
|
91
97
|
revision: revision of the model
|
|
98
|
+
device: Device used to load the model
|
|
92
99
|
kwargs: Any additional kwargs
|
|
93
100
|
"""
|
|
94
101
|
...
|
|
@@ -181,12 +188,19 @@ class CrossEncoderProtocol(Protocol):
|
|
|
181
188
|
In general the interface is kept aligned with sentence-transformers interface. In cases where exceptions occurs these are handled within MTEB.
|
|
182
189
|
"""
|
|
183
190
|
|
|
184
|
-
def __init__(
|
|
191
|
+
def __init__(
|
|
192
|
+
self,
|
|
193
|
+
model_name: str,
|
|
194
|
+
revision: str | None,
|
|
195
|
+
device: str | None = None,
|
|
196
|
+
**kwargs: Any,
|
|
197
|
+
) -> None:
|
|
185
198
|
"""The initialization function for the encoder. Used when calling it from the mteb run CLI.
|
|
186
199
|
|
|
187
200
|
Args:
|
|
188
201
|
model_name: Name of the model
|
|
189
202
|
revision: revision of the model
|
|
203
|
+
device: Device used to load the model
|
|
190
204
|
kwargs: Any additional kwargs
|
|
191
205
|
"""
|
|
192
206
|
...
|
|
@@ -26,17 +26,18 @@ SENTENCE_TRANSFORMERS_QUERY_ENCODE_VERSION = "5.0.0"
|
|
|
26
26
|
|
|
27
27
|
|
|
28
28
|
def sentence_transformers_loader(
|
|
29
|
-
model_name: str, revision: str | None = None, **kwargs
|
|
29
|
+
model_name: str, revision: str | None = None, device: str | None = None, **kwargs
|
|
30
30
|
) -> SentenceTransformerEncoderWrapper:
|
|
31
31
|
"""Loads a SentenceTransformer model and wraps it in a SentenceTransformerEncoderWrapper.
|
|
32
32
|
|
|
33
33
|
Args:
|
|
34
34
|
model_name: The name of the SentenceTransformer model to load.
|
|
35
35
|
revision: The revision of the model to load.
|
|
36
|
+
device: The device used to load the model.
|
|
36
37
|
kwargs: Additional arguments to pass to the SentenceTransformer model.
|
|
37
38
|
"""
|
|
38
39
|
return SentenceTransformerEncoderWrapper(
|
|
39
|
-
model=model_name, revision=revision, **kwargs
|
|
40
|
+
model=model_name, revision=revision, device=device, **kwargs
|
|
40
41
|
)
|
|
41
42
|
|
|
42
43
|
|
|
@@ -49,6 +50,7 @@ class SentenceTransformerEncoderWrapper(AbsEncoder):
|
|
|
49
50
|
self,
|
|
50
51
|
model: str | SentenceTransformer,
|
|
51
52
|
revision: str | None = None,
|
|
53
|
+
device: str | None = None,
|
|
52
54
|
model_prompts: dict[str, str] | None = None,
|
|
53
55
|
**kwargs,
|
|
54
56
|
) -> None:
|
|
@@ -57,6 +59,7 @@ class SentenceTransformerEncoderWrapper(AbsEncoder):
|
|
|
57
59
|
Args:
|
|
58
60
|
model: The SentenceTransformer model to use. Can be a string (model name), a SentenceTransformer model, or a CrossEncoder model.
|
|
59
61
|
revision: The revision of the model to use.
|
|
62
|
+
device: The device used to load the model.
|
|
60
63
|
model_prompts: A dictionary mapping task names to prompt names.
|
|
61
64
|
First priority is given to the composed prompt of task name + prompt type (query or passage), then to the specific task prompt,
|
|
62
65
|
then to the composed prompt of task type + prompt type, then to the specific task type prompt,
|
|
@@ -66,7 +69,9 @@ class SentenceTransformerEncoderWrapper(AbsEncoder):
|
|
|
66
69
|
from sentence_transformers import SentenceTransformer
|
|
67
70
|
|
|
68
71
|
if isinstance(model, str):
|
|
69
|
-
self.model = SentenceTransformer(
|
|
72
|
+
self.model = SentenceTransformer(
|
|
73
|
+
model, revision=revision, device=device, **kwargs
|
|
74
|
+
)
|
|
70
75
|
else:
|
|
71
76
|
self.model = model
|
|
72
77
|
|
|
@@ -266,6 +271,7 @@ class CrossEncoderWrapper:
|
|
|
266
271
|
self,
|
|
267
272
|
model: CrossEncoder | str,
|
|
268
273
|
revision: str | None = None,
|
|
274
|
+
device: str | None = None,
|
|
269
275
|
**kwargs,
|
|
270
276
|
) -> None:
|
|
271
277
|
from sentence_transformers import CrossEncoder
|
|
@@ -273,7 +279,7 @@ class CrossEncoderWrapper:
|
|
|
273
279
|
if isinstance(model, CrossEncoder):
|
|
274
280
|
self.model = model
|
|
275
281
|
elif isinstance(model, str):
|
|
276
|
-
self.model = CrossEncoder(model, revision=revision, **kwargs)
|
|
282
|
+
self.model = CrossEncoder(model, revision=revision, device=device, **kwargs)
|
|
277
283
|
|
|
278
284
|
self.mteb_model_meta = ModelMeta.from_cross_encoder(self.model)
|
|
279
285
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: mteb
|
|
3
|
-
Version: 2.6.
|
|
3
|
+
Version: 2.6.4
|
|
4
4
|
Summary: Massive Text Embedding Benchmark
|
|
5
5
|
Author-email: MTEB Contributors <niklas@huggingface.co>, Kenneth Enevoldsen <kenneth.enevoldsen@cas.au.dk>, Nouamane Tazi <nouamane@huggingface.co>, Nils Reimers <info@nils-reimers.de>
|
|
6
6
|
Maintainer-email: Kenneth Enevoldsen <kenneth.enevoldsen@cas.au.dk>, Roman Solomatin <risolomatin@gmail.com>, Isaac Chung <chungisaac1217@gmail.com>
|
|
@@ -1446,12 +1446,12 @@ mteb/leaderboard/table.py,sha256=KqU8aAbZ_tDp1O_qXRGWR32QnB7v_lsF6k5jxLcQVN0,103
|
|
|
1446
1446
|
mteb/leaderboard/text_segments.py,sha256=iMIkS04QQjPbT-SkU0x6fOcS8xRbUYevryu9HydipKM,6570
|
|
1447
1447
|
mteb/models/__init__.py,sha256=ABTuoqiBjBtBWW3LYY7ItBHdylR6jWoy06HH0g6j6fU,910
|
|
1448
1448
|
mteb/models/abs_encoder.py,sha256=HSJTjvcPYJRsKhhZeK2r6YP241EqpovwBcAuX1NevKE,16553
|
|
1449
|
-
mteb/models/get_model_meta.py,sha256=
|
|
1450
|
-
mteb/models/instruct_wrapper.py,sha256=
|
|
1451
|
-
mteb/models/model_meta.py,sha256=
|
|
1452
|
-
mteb/models/models_protocols.py,sha256=
|
|
1449
|
+
mteb/models/get_model_meta.py,sha256=wVh2FaWevJ10hJlbm-FQtTQazLMfnkEV3IK7PUyBPOQ,6082
|
|
1450
|
+
mteb/models/instruct_wrapper.py,sha256=PjgDKFnc160QP9jcPkxdI3OtcljyUdapuOcKZNGkNHo,9661
|
|
1451
|
+
mteb/models/model_meta.py,sha256=TrcE_Wwf_jQaKFU6K1oypEnEIBgdhHLDYLmvFwc2Xcg,30494
|
|
1452
|
+
mteb/models/models_protocols.py,sha256=LvHS14Rv22AsfY-391yau_cPAQwoKXRsvyYWCBy6VVQ,9165
|
|
1453
1453
|
mteb/models/search_wrappers.py,sha256=yu3BnXLqE5JbOD14cF2mhyjvlF5LRKPfgk8uUuDhbjI,20939
|
|
1454
|
-
mteb/models/sentence_transformer_wrapper.py,sha256=
|
|
1454
|
+
mteb/models/sentence_transformer_wrapper.py,sha256=KLleEFx31773zPT-5mqHGBOT5Km6fVkwwxtMYuepeZY,12829
|
|
1455
1455
|
mteb/models/cache_wrappers/__init__.py,sha256=1w1TnMwulWJSzNkLXjbh5MY3sqgHWc6vUntYn49i9X8,169
|
|
1456
1456
|
mteb/models/cache_wrappers/cache_backend_protocol.py,sha256=iGWdqDEoaCxUVEnwsXhy-m9d2QX8KTaQ9m2ZyawrMes,1634
|
|
1457
1457
|
mteb/models/cache_wrappers/cache_wrapper.py,sha256=GPC0UhHfkUH-i-Q4HdFvBev6h6GtMlWEId_B3tL-J54,6600
|
|
@@ -1472,9 +1472,9 @@ mteb/models/model_implementations/bica_model.py,sha256=Q2dg0w_lrcBhnOUjI4ej9ec9U
|
|
|
1472
1472
|
mteb/models/model_implementations/blip2_models.py,sha256=F55NYHrK-rprWblIfkKg3GRsOuTqBNZlOY1R33UnLms,7687
|
|
1473
1473
|
mteb/models/model_implementations/blip_models.py,sha256=LZrk5tn_9gokuZTfuv-DasJqx3UTgZsAEFmlJpQ-9xc,11596
|
|
1474
1474
|
mteb/models/model_implementations/bm25.py,sha256=nSDtTXu5a5EkjuaF6V4iParwpxlnXKVNDFntp6uj1Q8,4846
|
|
1475
|
-
mteb/models/model_implementations/bmretriever_models.py,sha256=
|
|
1475
|
+
mteb/models/model_implementations/bmretriever_models.py,sha256=Z4lbE0ggAp9dnHKZCrFAuClM5_ie_wirfwhU9R_ddiA,6721
|
|
1476
1476
|
mteb/models/model_implementations/cadet_models.py,sha256=wzbPmhsvBogFAEukubUir8EItlcmjcmfIGNMhtj-p7Y,2251
|
|
1477
|
-
mteb/models/model_implementations/cde_models.py,sha256=
|
|
1477
|
+
mteb/models/model_implementations/cde_models.py,sha256=u3G-BEWFpL1tsiONs3iaz9BJ_IEcNN0366fCQMMWr2A,9209
|
|
1478
1478
|
mteb/models/model_implementations/clip_models.py,sha256=snF74_5ISfrRYJwB4yHslO5SEF1cXYa6XIlNaplEqX0,6137
|
|
1479
1479
|
mteb/models/model_implementations/clips_models.py,sha256=QV9fIoyP2dKrra9aS04TE6rveUecVggr3jfXwNeSAOw,3488
|
|
1480
1480
|
mteb/models/model_implementations/codefuse_models.py,sha256=NXkFqb1Pdp-HLWkzhh0ZzjVxd45fP0cQgGZ1KvXBk_s,14053
|
|
@@ -1488,7 +1488,7 @@ mteb/models/model_implementations/conan_models.py,sha256=AJJ8_Mv4QR1kQoKamjoZqgj
|
|
|
1488
1488
|
mteb/models/model_implementations/dino_models.py,sha256=P2f_iOFYK4bdDDiYmNgmtWFBaQbyE-0DHUdBAeMI2LE,25429
|
|
1489
1489
|
mteb/models/model_implementations/e5_instruct.py,sha256=6bQLMC8Nea59qSu8RSqZp9n8XuQokBJHoxfZb2l6BQM,7780
|
|
1490
1490
|
mteb/models/model_implementations/e5_models.py,sha256=18--kpfMSKxgflGjB3GvyDHOjzOpuooc3iSVe-no2U0,9607
|
|
1491
|
-
mteb/models/model_implementations/e5_v.py,sha256=
|
|
1491
|
+
mteb/models/model_implementations/e5_v.py,sha256=_EGSMU38BrqshKUqZfjDlXvUfrUXMZXiybQP_xpSSOQ,6769
|
|
1492
1492
|
mteb/models/model_implementations/eagerworks_models.py,sha256=7bSInJGHOUqc9N-yzq0KUAtJZDX0zZkmEkzbCG_Pz0c,5770
|
|
1493
1493
|
mteb/models/model_implementations/emillykkejensen_models.py,sha256=8TY70wiyDfjqN3BdAD9DJMnIXObTczCRYk4hYWmQOjE,3695
|
|
1494
1494
|
mteb/models/model_implementations/en_code_retriever.py,sha256=6sSJ7l8Zrf71fYlcGaWAdF0vcZ9OAFeC1IsVtM2W_i8,1069
|
|
@@ -1508,7 +1508,7 @@ mteb/models/model_implementations/ibm_granite_models.py,sha256=--8N-8Nk2V5TZqGUA
|
|
|
1508
1508
|
mteb/models/model_implementations/inf_models.py,sha256=IBC3TaEkOxrUDXkhXaVnxerjWOZZv1v1eEqhweGWKMY,2958
|
|
1509
1509
|
mteb/models/model_implementations/jasper_models.py,sha256=K2DC0JfMVG8Fa822-xemKNhtuL2fZgiKYTTpXp2yBGg,16263
|
|
1510
1510
|
mteb/models/model_implementations/jina_clip.py,sha256=xV1R5xyHqZHyzlpx7O0Pg1SwTagGEwt_kw3wWoshgNM,5128
|
|
1511
|
-
mteb/models/model_implementations/jina_models.py,sha256=
|
|
1511
|
+
mteb/models/model_implementations/jina_models.py,sha256=gdTGC2abKhne2nTbfX1K4S-xr3MlFJT99Iu5ynIFI7w,35004
|
|
1512
1512
|
mteb/models/model_implementations/kalm_models.py,sha256=SHqkw5p7HzmQrb_bIFjRp1rsuv2v531nXIk390h_ojY,62115
|
|
1513
1513
|
mteb/models/model_implementations/kblab.py,sha256=n6sMGorSIBQlRHipPC3j2UiKA3r7avriwPvw0wuQKe4,1161
|
|
1514
1514
|
mteb/models/model_implementations/kennethenevoldsen_models.py,sha256=KvOhXDuhCtsTBGHg3ukCrQ45oz_hFylH7XjX3yjg1Ys,3013
|
|
@@ -1530,11 +1530,11 @@ mteb/models/model_implementations/model2vec_models.py,sha256=scVmIw-kBysX_kiQ8j8
|
|
|
1530
1530
|
mteb/models/model_implementations/moka_models.py,sha256=xY3geXKZwefqVsDZq95AB75GlZpvA9mJKSyPMvb75Us,5073
|
|
1531
1531
|
mteb/models/model_implementations/mxbai_models.py,sha256=YcgOdcx_vv5UpPi7k7PBuq_M0eqCaktfWfQV5NTlNoc,3929
|
|
1532
1532
|
mteb/models/model_implementations/nbailab.py,sha256=DtfHjQgGX1YPnlceqZDqDr6IlFwKCJjWN-BEcNt5m-s,2474
|
|
1533
|
-
mteb/models/model_implementations/no_instruct_sentence_models.py,sha256=
|
|
1534
|
-
mteb/models/model_implementations/nomic_models.py,sha256=
|
|
1533
|
+
mteb/models/model_implementations/no_instruct_sentence_models.py,sha256=t0CtgIiZHq4KgkPYUbxHQnxnaOaPYqpatCYa5Na-SSs,3993
|
|
1534
|
+
mteb/models/model_implementations/nomic_models.py,sha256=n5_0QCqZYq3Pzdjf9hxx15XswjmZZgWpq0GOVN9sWiQ,14895
|
|
1535
1535
|
mteb/models/model_implementations/nomic_models_vision.py,sha256=9AQRJkPkFDPjuSqdIh8wJ0-pqS2fe_oDZzPR4Y0tOSg,6831
|
|
1536
1536
|
mteb/models/model_implementations/nvidia_llama_nemoretriever_colemb.py,sha256=phbwPnRfrEuJTlrUucI1qxcViMQWogeXQkTZbUkNsQc,6388
|
|
1537
|
-
mteb/models/model_implementations/nvidia_models.py,sha256=
|
|
1537
|
+
mteb/models/model_implementations/nvidia_models.py,sha256=82hVLFRhABDVtVu_1Y3R_3IxX9ETtYJpnw4AeYBOiiM,21666
|
|
1538
1538
|
mteb/models/model_implementations/octen_models.py,sha256=v4Mk6qMkK6yoMS5QomZdDWCP7ysB7CYf3VxuuOYVpu4,7481
|
|
1539
1539
|
mteb/models/model_implementations/openai_models.py,sha256=905BajYi_XyOZgqU3AeKpwIttLoUitaAyc48sTWI6Jg,9482
|
|
1540
1540
|
mteb/models/model_implementations/openclip_models.py,sha256=aFBWqHkWjHm8OfCB8RTNiaO03oaILAE2jVLR1VFZgPk,11532
|
|
@@ -2603,9 +2603,9 @@ mteb/types/_metadata.py,sha256=NN-W0S6a5TDV7UkpRx1pyWtGF4TyyCyoPUfHOwdeci8,2290
|
|
|
2603
2603
|
mteb/types/_result.py,sha256=UKNokV9pu3G74MGebocU512aU_fFU9I9nPKnrG9Q0iE,1035
|
|
2604
2604
|
mteb/types/_string_validators.py,sha256=PY-dYq4E8O50VS3bLYdldPWp400fl_WzUjfVSkNWe8U,523
|
|
2605
2605
|
mteb/types/statistics.py,sha256=GwkBPmAr18Onu-vHtzHs0PFrhCozdOMiT13HwnWL4ZM,3961
|
|
2606
|
-
mteb-2.6.
|
|
2607
|
-
mteb-2.6.
|
|
2608
|
-
mteb-2.6.
|
|
2609
|
-
mteb-2.6.
|
|
2610
|
-
mteb-2.6.
|
|
2611
|
-
mteb-2.6.
|
|
2606
|
+
mteb-2.6.4.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
2607
|
+
mteb-2.6.4.dist-info/METADATA,sha256=YiucWKiIeOdDqvQNLIgVffTBWr3mQJWub7t86-2UPmA,14251
|
|
2608
|
+
mteb-2.6.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
2609
|
+
mteb-2.6.4.dist-info/entry_points.txt,sha256=8IJoEJFKoDHmVnNev-qJ9pp4Ln7_1-ma9QsXnzVCzGU,39
|
|
2610
|
+
mteb-2.6.4.dist-info/top_level.txt,sha256=OLVIjcQAlWBz0bdmutKlWHLF42FF0hp4uVAg3ZyiG4U,5
|
|
2611
|
+
mteb-2.6.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|