ScandEval 16.11.0__py3-none-any.whl → 16.13.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scandeval/__init__.py +0 -9
- scandeval/async_utils.py +46 -0
- scandeval/benchmark_config_factory.py +31 -2
- scandeval/benchmark_modules/fresh.py +2 -1
- scandeval/benchmark_modules/hf.py +76 -23
- scandeval/benchmark_modules/litellm.py +33 -15
- scandeval/benchmark_modules/vllm.py +97 -44
- scandeval/benchmarker.py +29 -33
- scandeval/cli.py +11 -0
- scandeval/constants.py +36 -2
- scandeval/custom_dataset_configs.py +152 -0
- scandeval/data_loading.py +87 -31
- scandeval/data_models.py +405 -224
- scandeval/dataset_configs/__init__.py +51 -25
- scandeval/dataset_configs/albanian.py +1 -1
- scandeval/dataset_configs/belarusian.py +47 -0
- scandeval/dataset_configs/bulgarian.py +1 -1
- scandeval/dataset_configs/catalan.py +1 -1
- scandeval/dataset_configs/croatian.py +1 -1
- scandeval/dataset_configs/danish.py +3 -2
- scandeval/dataset_configs/dutch.py +16 -5
- scandeval/dataset_configs/english.py +4 -3
- scandeval/dataset_configs/estonian.py +8 -7
- scandeval/dataset_configs/faroese.py +1 -1
- scandeval/dataset_configs/finnish.py +5 -4
- scandeval/dataset_configs/french.py +6 -5
- scandeval/dataset_configs/german.py +4 -3
- scandeval/dataset_configs/greek.py +1 -1
- scandeval/dataset_configs/hungarian.py +1 -1
- scandeval/dataset_configs/icelandic.py +4 -3
- scandeval/dataset_configs/italian.py +4 -3
- scandeval/dataset_configs/latvian.py +2 -2
- scandeval/dataset_configs/lithuanian.py +1 -1
- scandeval/dataset_configs/norwegian.py +6 -5
- scandeval/dataset_configs/polish.py +4 -3
- scandeval/dataset_configs/portuguese.py +5 -4
- scandeval/dataset_configs/romanian.py +2 -2
- scandeval/dataset_configs/serbian.py +1 -1
- scandeval/dataset_configs/slovene.py +1 -1
- scandeval/dataset_configs/spanish.py +4 -3
- scandeval/dataset_configs/swedish.py +4 -3
- scandeval/dataset_configs/ukrainian.py +1 -1
- scandeval/generation_utils.py +6 -6
- scandeval/metrics/__init__.py +1 -0
- scandeval/metrics/bias.py +237 -0
- scandeval/metrics/huggingface.py +2 -1
- scandeval/metrics/llm_as_a_judge.py +1 -1
- scandeval/metrics/pipeline.py +1 -1
- scandeval/model_cache.py +34 -4
- scandeval/prompt_templates/linguistic_acceptability.py +9 -0
- scandeval/prompt_templates/multiple_choice.py +9 -0
- scandeval/prompt_templates/named_entity_recognition.py +21 -0
- scandeval/prompt_templates/reading_comprehension.py +10 -0
- scandeval/prompt_templates/sentiment_classification.py +11 -0
- scandeval/string_utils.py +157 -0
- scandeval/task_group_utils/sequence_classification.py +2 -5
- scandeval/task_group_utils/token_classification.py +2 -4
- scandeval/tasks.py +22 -0
- scandeval/tokenisation_utils.py +12 -1
- scandeval/utils.py +13 -383
- scandeval-16.13.0.dist-info/METADATA +334 -0
- scandeval-16.13.0.dist-info/RECORD +94 -0
- scandeval-16.11.0.dist-info/METADATA +0 -649
- scandeval-16.11.0.dist-info/RECORD +0 -89
- {scandeval-16.11.0.dist-info → scandeval-16.13.0.dist-info}/WHEEL +0 -0
- {scandeval-16.11.0.dist-info → scandeval-16.13.0.dist-info}/entry_points.txt +0 -0
- {scandeval-16.11.0.dist-info → scandeval-16.13.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
"""Utility functions related to string manipulation or structuring."""
|
|
2
|
+
|
|
3
|
+
import collections.abc as c
|
|
4
|
+
import logging
|
|
5
|
+
import re
|
|
6
|
+
import typing as t
|
|
7
|
+
|
|
8
|
+
import demjson3
|
|
9
|
+
import numpy as np
|
|
10
|
+
|
|
11
|
+
from .exceptions import InvalidBenchmark, InvalidModel
|
|
12
|
+
from .logging_utils import log
|
|
13
|
+
|
|
14
|
+
if t.TYPE_CHECKING:
|
|
15
|
+
from .data_models import ModelIdComponents
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def scramble(text: str) -> str:
|
|
19
|
+
"""Scramble a string in a bijective manner.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
text:
|
|
23
|
+
The string to scramble.
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
The scrambled string.
|
|
27
|
+
"""
|
|
28
|
+
rng = np.random.default_rng(seed=4242)
|
|
29
|
+
permutation = rng.permutation(x=len(text))
|
|
30
|
+
scrambled = "".join(text[i] for i in permutation)
|
|
31
|
+
return scrambled
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def unscramble(scrambled_text: str) -> str:
|
|
35
|
+
"""Unscramble a string in a bijective manner.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
scrambled_text:
|
|
39
|
+
The scrambled string to unscramble.
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
The unscrambled string.
|
|
43
|
+
"""
|
|
44
|
+
rng = np.random.default_rng(seed=4242)
|
|
45
|
+
permutation = rng.permutation(x=len(scrambled_text))
|
|
46
|
+
inverse_permutation = np.argsort(permutation)
|
|
47
|
+
unscrambled = "".join(scrambled_text[i] for i in inverse_permutation)
|
|
48
|
+
return unscrambled
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def extract_json_dict_from_string(s: str) -> dict | None:
|
|
52
|
+
"""Extract a JSON dictionary from a string.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
s:
|
|
56
|
+
The string to extract the JSON dictionary from.
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
The extracted JSON dictionary, or None if no JSON dictionary could be found.
|
|
60
|
+
"""
|
|
61
|
+
json_regex = r"\{[^{}]*?\}"
|
|
62
|
+
if (json_match := re.search(pattern=json_regex, string=s, flags=re.DOTALL)) is None:
|
|
63
|
+
log(
|
|
64
|
+
"The model output does not contain any JSON dictionary, so cannot parse "
|
|
65
|
+
f"it. Skipping. Here is the output: {s!r}",
|
|
66
|
+
level=logging.DEBUG,
|
|
67
|
+
)
|
|
68
|
+
return None
|
|
69
|
+
json_string = json_match.group()
|
|
70
|
+
try:
|
|
71
|
+
json_output = demjson3.decode(txt=json_string)
|
|
72
|
+
except demjson3.JSONDecodeError:
|
|
73
|
+
log(
|
|
74
|
+
"The model output is not valid JSON, so cannot parse it. Skipping. "
|
|
75
|
+
f"Here is the output: {json_string!r}",
|
|
76
|
+
level=logging.DEBUG,
|
|
77
|
+
)
|
|
78
|
+
return None
|
|
79
|
+
if not isinstance(json_output, dict):
|
|
80
|
+
log(
|
|
81
|
+
"The model output is not a JSON dictionary, so cannot parse "
|
|
82
|
+
f"it. Skipping. Here is the output: {json_string!r}",
|
|
83
|
+
level=logging.DEBUG,
|
|
84
|
+
)
|
|
85
|
+
return None
|
|
86
|
+
elif not all(isinstance(key, str) for key in json_output.keys()):
|
|
87
|
+
log(
|
|
88
|
+
"The model output is not a JSON dictionary with string keys, "
|
|
89
|
+
"so cannot parse it. Skipping. Here is the output: "
|
|
90
|
+
f"{json_string!r}",
|
|
91
|
+
level=logging.DEBUG,
|
|
92
|
+
)
|
|
93
|
+
return None
|
|
94
|
+
return json_output
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def extract_multiple_choice_labels(
|
|
98
|
+
prompt: str, candidate_labels: c.Sequence[str]
|
|
99
|
+
) -> c.Sequence[str]:
|
|
100
|
+
"""Extract multiple choice labels from a prompt.
|
|
101
|
+
|
|
102
|
+
Args:
|
|
103
|
+
prompt:
|
|
104
|
+
The prompt to extract the labels from.
|
|
105
|
+
candidate_labels:
|
|
106
|
+
The candidate labels to look for in the prompt.
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
The extracted labels.
|
|
110
|
+
"""
|
|
111
|
+
sample_candidate_labels: list[str] = list()
|
|
112
|
+
for candidate_label in candidate_labels:
|
|
113
|
+
candidate_label_match = re.search(
|
|
114
|
+
pattern=rf"\b{candidate_label}\. ", string=prompt, flags=re.IGNORECASE
|
|
115
|
+
)
|
|
116
|
+
if candidate_label_match is not None:
|
|
117
|
+
sample_candidate_labels.append(candidate_label)
|
|
118
|
+
if not sample_candidate_labels:
|
|
119
|
+
raise InvalidBenchmark(
|
|
120
|
+
"Could not extract any candidate labels from the prompt. Please ensure "
|
|
121
|
+
"that the candidate labels are present in the prompt, each followed by a "
|
|
122
|
+
"dot and a space (e.g., 'a. '). The candidate labels are: "
|
|
123
|
+
f"{', '.join(candidate_labels)}. Here is the prompt: {prompt!r}"
|
|
124
|
+
)
|
|
125
|
+
return sample_candidate_labels
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def split_model_id(model_id: str) -> "ModelIdComponents":
|
|
129
|
+
"""Split a model ID into its components.
|
|
130
|
+
|
|
131
|
+
Args:
|
|
132
|
+
model_id:
|
|
133
|
+
The model ID to split.
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
The split model ID.
|
|
137
|
+
|
|
138
|
+
Raises:
|
|
139
|
+
If the model ID is not valid.
|
|
140
|
+
"""
|
|
141
|
+
# Importing here to avoid circular imports
|
|
142
|
+
from .data_models import ModelIdComponents
|
|
143
|
+
|
|
144
|
+
# Attempt to extract the model ID, revision, and param using regex
|
|
145
|
+
model_id_match = re.match(pattern=r"^[^@#]+", string=model_id)
|
|
146
|
+
revision_match = re.search(pattern=r"@([^@#]+)", string=model_id)
|
|
147
|
+
param_match = re.search(pattern=r"#([^@#]+)", string=model_id)
|
|
148
|
+
|
|
149
|
+
# If we cannot extract the model ID, raise an error
|
|
150
|
+
if model_id_match is None:
|
|
151
|
+
raise InvalidModel(f"The model ID {model_id!r} is not valid.")
|
|
152
|
+
model_id = model_id_match.group()
|
|
153
|
+
|
|
154
|
+
# Extract the revision and param and return the result
|
|
155
|
+
revision = revision_match.group(1) if revision_match is not None else "main"
|
|
156
|
+
param = param_match.group(1) if param_match is not None else None
|
|
157
|
+
return ModelIdComponents(model_id=model_id, revision=revision, param=param)
|
|
@@ -10,12 +10,9 @@ import numpy as np
|
|
|
10
10
|
|
|
11
11
|
from ..enums import TaskGroup
|
|
12
12
|
from ..exceptions import InvalidBenchmark
|
|
13
|
+
from ..string_utils import extract_multiple_choice_labels
|
|
13
14
|
from ..types import Predictions
|
|
14
|
-
from ..utils import
|
|
15
|
-
extract_multiple_choice_labels,
|
|
16
|
-
log_once,
|
|
17
|
-
raise_if_model_output_contains_nan_values,
|
|
18
|
-
)
|
|
15
|
+
from ..utils import log_once, raise_if_model_output_contains_nan_values
|
|
19
16
|
|
|
20
17
|
if t.TYPE_CHECKING:
|
|
21
18
|
from datasets.arrow_dataset import Dataset
|
|
@@ -9,10 +9,8 @@ import numpy as np
|
|
|
9
9
|
|
|
10
10
|
from ..exceptions import InvalidBenchmark
|
|
11
11
|
from ..logging_utils import log
|
|
12
|
-
from ..
|
|
13
|
-
|
|
14
|
-
raise_if_model_output_contains_nan_values,
|
|
15
|
-
)
|
|
12
|
+
from ..string_utils import extract_json_dict_from_string
|
|
13
|
+
from ..utils import raise_if_model_output_contains_nan_values
|
|
16
14
|
|
|
17
15
|
if t.TYPE_CHECKING:
|
|
18
16
|
from datasets.arrow_dataset import Dataset
|
scandeval/tasks.py
CHANGED
|
@@ -153,6 +153,28 @@ EUROPEAN_VALUES = Task(
|
|
|
153
153
|
)
|
|
154
154
|
|
|
155
155
|
|
|
156
|
+
MCSTEREO = Task(
|
|
157
|
+
name="multiple-choice-stereotype-bias",
|
|
158
|
+
task_group=TaskGroup.MULTIPLE_CHOICE_CLASSIFICATION,
|
|
159
|
+
template_dict=MULTIPLE_CHOICE_TEMPLATES,
|
|
160
|
+
metrics=[
|
|
161
|
+
m.bias_adjusted_accuracy_ambig_metric,
|
|
162
|
+
m.bias_ambig_metric,
|
|
163
|
+
m.accuracy_ambig_metric,
|
|
164
|
+
],
|
|
165
|
+
default_num_few_shot_examples=0,
|
|
166
|
+
default_max_generated_tokens=NUM_GENERATION_TOKENS_FOR_CLASSIFICATION,
|
|
167
|
+
default_labels=["a", "b", "c"],
|
|
168
|
+
default_allowed_model_types=[ModelType.GENERATIVE],
|
|
169
|
+
default_allowed_generative_types=[
|
|
170
|
+
GenerativeType.INSTRUCTION_TUNED,
|
|
171
|
+
GenerativeType.REASONING,
|
|
172
|
+
],
|
|
173
|
+
requires_zero_shot=True,
|
|
174
|
+
uses_logprobs=True,
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
|
|
156
178
|
SPEED = Task(
|
|
157
179
|
name="speed",
|
|
158
180
|
task_group=TaskGroup.SPEED,
|
scandeval/tokenisation_utils.py
CHANGED
|
@@ -6,6 +6,7 @@ import re
|
|
|
6
6
|
import typing as t
|
|
7
7
|
|
|
8
8
|
import torch
|
|
9
|
+
from transformers import BatchEncoding
|
|
9
10
|
|
|
10
11
|
from .constants import BOS_TOKENS, EOS_TOKENS, PAD_TOKENS
|
|
11
12
|
from .enums import GenerativeType
|
|
@@ -340,7 +341,17 @@ def get_end_of_chat_token_ids(
|
|
|
340
341
|
if "does not have a chat template" in str(e):
|
|
341
342
|
return None
|
|
342
343
|
raise e
|
|
343
|
-
|
|
344
|
+
|
|
345
|
+
assert isinstance(token_ids, (BatchEncoding, list)), (
|
|
346
|
+
f"Expected token_ids to be a BatchEncoding or list, but got {type(token_ids)}.",
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
if isinstance(token_ids, BatchEncoding):
|
|
350
|
+
token_ids = token_ids.input_ids
|
|
351
|
+
|
|
352
|
+
assert isinstance(token_ids, list), (
|
|
353
|
+
f"Expected token_ids to be a list, but got {type(token_ids)}.",
|
|
354
|
+
)
|
|
344
355
|
|
|
345
356
|
for idx, token in enumerate(tokeniser.convert_ids_to_tokens(token_ids)):
|
|
346
357
|
if "X" in token:
|
scandeval/utils.py
CHANGED
|
@@ -1,22 +1,14 @@
|
|
|
1
1
|
"""Utility functions to be used in other scripts."""
|
|
2
2
|
|
|
3
|
-
import asyncio
|
|
4
|
-
import collections.abc as c
|
|
5
3
|
import gc
|
|
6
|
-
import importlib
|
|
7
|
-
import importlib.metadata
|
|
8
|
-
import importlib.util
|
|
9
4
|
import logging
|
|
10
5
|
import os
|
|
11
6
|
import random
|
|
12
|
-
import re
|
|
13
7
|
import socket
|
|
14
8
|
import sys
|
|
15
9
|
import typing as t
|
|
16
10
|
from pathlib import Path
|
|
17
|
-
from types import ModuleType, TracebackType
|
|
18
11
|
|
|
19
|
-
import demjson3
|
|
20
12
|
import huggingface_hub as hf_hub
|
|
21
13
|
import numpy as np
|
|
22
14
|
import torch
|
|
@@ -24,48 +16,14 @@ from huggingface_hub.errors import LocalTokenNotFoundError
|
|
|
24
16
|
from requests.exceptions import RequestException
|
|
25
17
|
|
|
26
18
|
from .caching_utils import cache_arguments
|
|
27
|
-
from .constants import
|
|
28
|
-
from .exceptions import
|
|
29
|
-
from .logging_utils import
|
|
19
|
+
from .constants import LOCAL_MODELS_REQUIRED_FILES
|
|
20
|
+
from .exceptions import InvalidModel, NaNValueInModelOutput
|
|
21
|
+
from .logging_utils import log_once
|
|
30
22
|
|
|
31
23
|
if t.TYPE_CHECKING:
|
|
32
|
-
from .data_models import ModelIdComponents
|
|
33
24
|
from .types import Predictions
|
|
34
25
|
|
|
35
26
|
|
|
36
|
-
def create_model_cache_dir(cache_dir: str, model_id: str) -> str:
|
|
37
|
-
"""Create cache directory for a model.
|
|
38
|
-
|
|
39
|
-
Args:
|
|
40
|
-
cache_dir:
|
|
41
|
-
The cache directory.
|
|
42
|
-
model_id:
|
|
43
|
-
The model ID.
|
|
44
|
-
|
|
45
|
-
Returns:
|
|
46
|
-
The path to the cache directory.
|
|
47
|
-
"""
|
|
48
|
-
# If the model ID is a path, we just use that as the cache dir
|
|
49
|
-
if Path(model_id).is_dir():
|
|
50
|
-
log_once(
|
|
51
|
-
f"Since the model {model_id!r} is a local model, we will use the model "
|
|
52
|
-
"directory directly as the model cache directory.",
|
|
53
|
-
level=logging.DEBUG,
|
|
54
|
-
)
|
|
55
|
-
return model_id
|
|
56
|
-
|
|
57
|
-
# Otherwise, we create a cache dir based on the model ID
|
|
58
|
-
model_cache_dir = Path(
|
|
59
|
-
cache_dir, "model_cache", model_id.replace("/", "--")
|
|
60
|
-
).as_posix()
|
|
61
|
-
log_once(
|
|
62
|
-
f"Using the model cache directory {model_cache_dir!r} for the model "
|
|
63
|
-
f"{model_id!r}.",
|
|
64
|
-
level=logging.DEBUG,
|
|
65
|
-
)
|
|
66
|
-
return model_cache_dir
|
|
67
|
-
|
|
68
|
-
|
|
69
27
|
def resolve_model_path(download_dir: str) -> str:
|
|
70
28
|
"""Resolve the path to the directory containing the model config files and weights.
|
|
71
29
|
|
|
@@ -107,16 +65,16 @@ def resolve_model_path(download_dir: str) -> str:
|
|
|
107
65
|
f"at {model_path}"
|
|
108
66
|
)
|
|
109
67
|
|
|
110
|
-
# Check that found_files contains at least
|
|
111
|
-
|
|
112
|
-
(file for file in found_files if file.name
|
|
68
|
+
# Check that found_files contains at least one of the required files
|
|
69
|
+
found_required_file = next(
|
|
70
|
+
(file for file in found_files if file.name in LOCAL_MODELS_REQUIRED_FILES), None
|
|
113
71
|
)
|
|
114
|
-
if
|
|
72
|
+
if found_required_file is None:
|
|
115
73
|
raise InvalidModel(
|
|
116
|
-
f"
|
|
117
|
-
f"at {model_path}"
|
|
74
|
+
f"At least one of the files {LOCAL_MODELS_REQUIRED_FILES} must be present "
|
|
75
|
+
f"for {model_id_path.strip('models--')} at {model_path}"
|
|
118
76
|
)
|
|
119
|
-
model_path =
|
|
77
|
+
model_path = found_required_file.parent
|
|
120
78
|
|
|
121
79
|
# As a precaution we also check that all of the files are in the same directory
|
|
122
80
|
# if not we create a new dir with symlinks to all of the files from all snapshots
|
|
@@ -164,47 +122,6 @@ def enforce_reproducibility(seed: int = 4242) -> np.random.Generator:
|
|
|
164
122
|
return rng
|
|
165
123
|
|
|
166
124
|
|
|
167
|
-
def get_class_by_name(
|
|
168
|
-
class_name: str | c.Sequence[str], module_name: str
|
|
169
|
-
) -> t.Type | None:
|
|
170
|
-
"""Get a class by its name.
|
|
171
|
-
|
|
172
|
-
Args:
|
|
173
|
-
class_name:
|
|
174
|
-
The name of the class, written in kebab-case. The corresponding class name
|
|
175
|
-
must be the same, but written in PascalCase, and lying in a module with the
|
|
176
|
-
same name, but written in snake_case. If a list of strings is passed, the
|
|
177
|
-
first class that is found is returned.
|
|
178
|
-
module_name:
|
|
179
|
-
The name of the module where the class is located.
|
|
180
|
-
|
|
181
|
-
Returns:
|
|
182
|
-
The class. If the class is not found, None is returned.
|
|
183
|
-
"""
|
|
184
|
-
if isinstance(class_name, str):
|
|
185
|
-
class_name = [class_name]
|
|
186
|
-
|
|
187
|
-
error_messages = list()
|
|
188
|
-
for name in class_name:
|
|
189
|
-
try:
|
|
190
|
-
module = importlib.import_module(name=module_name)
|
|
191
|
-
class_: t.Type = getattr(module, name)
|
|
192
|
-
return class_
|
|
193
|
-
except (ModuleNotFoundError, AttributeError) as e:
|
|
194
|
-
error_messages.append(str(e))
|
|
195
|
-
|
|
196
|
-
if error_messages:
|
|
197
|
-
errors = "\n- " + "\n- ".join(error_messages)
|
|
198
|
-
log(
|
|
199
|
-
f"Could not find the class with the name(s) {', '.join(class_name)}. The "
|
|
200
|
-
f"following error messages were raised: {errors}",
|
|
201
|
-
level=logging.DEBUG,
|
|
202
|
-
)
|
|
203
|
-
|
|
204
|
-
# If the class could not be found, return None
|
|
205
|
-
return None
|
|
206
|
-
|
|
207
|
-
|
|
208
125
|
def get_min_cuda_compute_capability() -> float | None:
|
|
209
126
|
"""Gets the lowest cuda capability.
|
|
210
127
|
|
|
@@ -222,7 +139,7 @@ def get_min_cuda_compute_capability() -> float | None:
|
|
|
222
139
|
|
|
223
140
|
@cache_arguments(disable_condition=lambda: hasattr(sys, "_called_from_test"))
|
|
224
141
|
def internet_connection_available() -> bool:
|
|
225
|
-
"""Checks if internet connection is available
|
|
142
|
+
"""Checks if internet connection is available.
|
|
226
143
|
|
|
227
144
|
Returns:
|
|
228
145
|
Whether or not internet connection is available.
|
|
@@ -265,141 +182,6 @@ def raise_if_model_output_contains_nan_values(model_output: "Predictions") -> No
|
|
|
265
182
|
raise NaNValueInModelOutput()
|
|
266
183
|
|
|
267
184
|
|
|
268
|
-
def scramble(text: str) -> str:
|
|
269
|
-
"""Scramble a string in a bijective manner.
|
|
270
|
-
|
|
271
|
-
Args:
|
|
272
|
-
text:
|
|
273
|
-
The string to scramble.
|
|
274
|
-
|
|
275
|
-
Returns:
|
|
276
|
-
The scrambled string.
|
|
277
|
-
"""
|
|
278
|
-
rng = np.random.default_rng(seed=4242)
|
|
279
|
-
permutation = rng.permutation(x=len(text))
|
|
280
|
-
scrambled = "".join(text[i] for i in permutation)
|
|
281
|
-
return scrambled
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
def unscramble(scrambled_text: str) -> str:
|
|
285
|
-
"""Unscramble a string in a bijective manner.
|
|
286
|
-
|
|
287
|
-
Args:
|
|
288
|
-
scrambled_text:
|
|
289
|
-
The scrambled string to unscramble.
|
|
290
|
-
|
|
291
|
-
Returns:
|
|
292
|
-
The unscrambled string.
|
|
293
|
-
"""
|
|
294
|
-
rng = np.random.default_rng(seed=4242)
|
|
295
|
-
permutation = rng.permutation(x=len(scrambled_text))
|
|
296
|
-
inverse_permutation = np.argsort(permutation)
|
|
297
|
-
unscrambled = "".join(scrambled_text[i] for i in inverse_permutation)
|
|
298
|
-
return unscrambled
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
def get_package_version(package_name: str) -> str | None:
|
|
302
|
-
"""Get the version of a package.
|
|
303
|
-
|
|
304
|
-
Args:
|
|
305
|
-
package_name:
|
|
306
|
-
The name of the package.
|
|
307
|
-
|
|
308
|
-
Returns:
|
|
309
|
-
The version of the package, or None if the package is not installed.
|
|
310
|
-
"""
|
|
311
|
-
try:
|
|
312
|
-
return importlib.metadata.version(package_name)
|
|
313
|
-
except importlib.metadata.PackageNotFoundError:
|
|
314
|
-
return None
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
def safe_run(coroutine: t.Coroutine[t.Any, t.Any, T]) -> T:
|
|
318
|
-
"""Run a coroutine, ensuring that the event loop is always closed when we're done.
|
|
319
|
-
|
|
320
|
-
Args:
|
|
321
|
-
coroutine:
|
|
322
|
-
The coroutine to run.
|
|
323
|
-
|
|
324
|
-
Returns:
|
|
325
|
-
The result of the coroutine.
|
|
326
|
-
"""
|
|
327
|
-
try:
|
|
328
|
-
loop = asyncio.get_event_loop()
|
|
329
|
-
except RuntimeError: # If the current event loop is closed
|
|
330
|
-
loop = asyncio.new_event_loop()
|
|
331
|
-
asyncio.set_event_loop(loop)
|
|
332
|
-
response = loop.run_until_complete(coroutine)
|
|
333
|
-
return response
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
async def add_semaphore_and_catch_exception(
|
|
337
|
-
coroutine: t.Coroutine[t.Any, t.Any, T], semaphore: asyncio.Semaphore
|
|
338
|
-
) -> T | Exception:
|
|
339
|
-
"""Run a coroutine with a semaphore.
|
|
340
|
-
|
|
341
|
-
Args:
|
|
342
|
-
coroutine:
|
|
343
|
-
The coroutine to run.
|
|
344
|
-
semaphore:
|
|
345
|
-
The semaphore to use.
|
|
346
|
-
|
|
347
|
-
Returns:
|
|
348
|
-
The result of the coroutine.
|
|
349
|
-
"""
|
|
350
|
-
async with semaphore:
|
|
351
|
-
try:
|
|
352
|
-
return await coroutine
|
|
353
|
-
except Exception as exc:
|
|
354
|
-
return exc
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
def extract_json_dict_from_string(s: str) -> dict | None:
|
|
358
|
-
"""Extract a JSON dictionary from a string.
|
|
359
|
-
|
|
360
|
-
Args:
|
|
361
|
-
s:
|
|
362
|
-
The string to extract the JSON dictionary from.
|
|
363
|
-
|
|
364
|
-
Returns:
|
|
365
|
-
The extracted JSON dictionary, or None if no JSON dictionary could be found.
|
|
366
|
-
"""
|
|
367
|
-
json_regex = r"\{[^{}]*?\}"
|
|
368
|
-
if (json_match := re.search(pattern=json_regex, string=s, flags=re.DOTALL)) is None:
|
|
369
|
-
log(
|
|
370
|
-
"The model output does not contain any JSON dictionary, so cannot parse "
|
|
371
|
-
f"it. Skipping. Here is the output: {s!r}",
|
|
372
|
-
level=logging.DEBUG,
|
|
373
|
-
)
|
|
374
|
-
return None
|
|
375
|
-
json_string = json_match.group()
|
|
376
|
-
try:
|
|
377
|
-
json_output = demjson3.decode(txt=json_string)
|
|
378
|
-
except demjson3.JSONDecodeError:
|
|
379
|
-
log(
|
|
380
|
-
"The model output is not valid JSON, so cannot parse it. Skipping. "
|
|
381
|
-
f"Here is the output: {json_string!r}",
|
|
382
|
-
level=logging.DEBUG,
|
|
383
|
-
)
|
|
384
|
-
return None
|
|
385
|
-
if not isinstance(json_output, dict):
|
|
386
|
-
log(
|
|
387
|
-
"The model output is not a JSON dictionary, so cannot parse "
|
|
388
|
-
f"it. Skipping. Here is the output: {json_string!r}",
|
|
389
|
-
level=logging.DEBUG,
|
|
390
|
-
)
|
|
391
|
-
return None
|
|
392
|
-
elif not all(isinstance(key, str) for key in json_output.keys()):
|
|
393
|
-
log(
|
|
394
|
-
"The model output is not a JSON dictionary with string keys, "
|
|
395
|
-
"so cannot parse it. Skipping. Here is the output: "
|
|
396
|
-
f"{json_string!r}",
|
|
397
|
-
level=logging.DEBUG,
|
|
398
|
-
)
|
|
399
|
-
return None
|
|
400
|
-
return json_output
|
|
401
|
-
|
|
402
|
-
|
|
403
185
|
@cache_arguments()
|
|
404
186
|
def get_hf_token(api_key: str | None) -> str | bool:
|
|
405
187
|
"""Get the Hugging Face token.
|
|
@@ -419,10 +201,9 @@ def get_hf_token(api_key: str | None) -> str | bool:
|
|
|
419
201
|
level=logging.DEBUG,
|
|
420
202
|
)
|
|
421
203
|
return api_key
|
|
422
|
-
elif (token := os.getenv("
|
|
204
|
+
elif (token := os.getenv("HF_TOKEN")) is not None:
|
|
423
205
|
log_once(
|
|
424
|
-
"Using the Hugging Face API key from the environment variable "
|
|
425
|
-
"`HUGGINGFACE_API_KEY`.",
|
|
206
|
+
"Using the Hugging Face API key from the environment variable `HF_TOKEN`.",
|
|
426
207
|
level=logging.DEBUG,
|
|
427
208
|
)
|
|
428
209
|
return token
|
|
@@ -448,154 +229,3 @@ def get_hf_token(api_key: str | None) -> str | bool:
|
|
|
448
229
|
level=logging.DEBUG,
|
|
449
230
|
)
|
|
450
231
|
return False
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
def extract_multiple_choice_labels(
|
|
454
|
-
prompt: str, candidate_labels: c.Sequence[str]
|
|
455
|
-
) -> c.Sequence[str]:
|
|
456
|
-
"""Extract multiple choice labels from a prompt.
|
|
457
|
-
|
|
458
|
-
Args:
|
|
459
|
-
prompt:
|
|
460
|
-
The prompt to extract the labels from.
|
|
461
|
-
candidate_labels:
|
|
462
|
-
The candidate labels to look for in the prompt.
|
|
463
|
-
|
|
464
|
-
Returns:
|
|
465
|
-
The extracted labels.
|
|
466
|
-
"""
|
|
467
|
-
sample_candidate_labels: list[str] = list()
|
|
468
|
-
for candidate_label in candidate_labels:
|
|
469
|
-
candidate_label_match = re.search(
|
|
470
|
-
pattern=rf"\b{candidate_label}\. ", string=prompt, flags=re.IGNORECASE
|
|
471
|
-
)
|
|
472
|
-
if candidate_label_match is not None:
|
|
473
|
-
sample_candidate_labels.append(candidate_label)
|
|
474
|
-
if not sample_candidate_labels:
|
|
475
|
-
raise InvalidBenchmark(
|
|
476
|
-
"Could not extract any candidate labels from the prompt. Please ensure "
|
|
477
|
-
"that the candidate labels are present in the prompt, each followed by a "
|
|
478
|
-
"dot and a space (e.g., 'a. '). The candidate labels are: "
|
|
479
|
-
f"{', '.join(candidate_labels)}. Here is the prompt: {prompt!r}"
|
|
480
|
-
)
|
|
481
|
-
return sample_candidate_labels
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
def split_model_id(model_id: str) -> "ModelIdComponents":
|
|
485
|
-
"""Split a model ID into its components.
|
|
486
|
-
|
|
487
|
-
Args:
|
|
488
|
-
model_id:
|
|
489
|
-
The model ID to split.
|
|
490
|
-
|
|
491
|
-
Returns:
|
|
492
|
-
The split model ID.
|
|
493
|
-
|
|
494
|
-
Raises:
|
|
495
|
-
If the model ID is not valid.
|
|
496
|
-
"""
|
|
497
|
-
# Importing here to avoid circular imports
|
|
498
|
-
from .data_models import ModelIdComponents
|
|
499
|
-
|
|
500
|
-
# Attempt to extract the model ID, revision, and param using regex
|
|
501
|
-
model_id_match = re.match(pattern=r"^[^@#]+", string=model_id)
|
|
502
|
-
revision_match = re.search(pattern=r"@([^@#]+)", string=model_id)
|
|
503
|
-
param_match = re.search(pattern=r"#([^@#]+)", string=model_id)
|
|
504
|
-
|
|
505
|
-
# If we cannot extract the model ID, raise an error
|
|
506
|
-
if model_id_match is None:
|
|
507
|
-
raise InvalidModel(f"The model ID {model_id!r} is not valid.")
|
|
508
|
-
model_id = model_id_match.group()
|
|
509
|
-
|
|
510
|
-
# Extract the revision and param and return the result
|
|
511
|
-
revision = revision_match.group(1) if revision_match is not None else "main"
|
|
512
|
-
param = param_match.group(1) if param_match is not None else None
|
|
513
|
-
return ModelIdComponents(model_id=model_id, revision=revision, param=param)
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
def load_custom_datasets_module(custom_datasets_file: Path) -> ModuleType | None:
|
|
517
|
-
"""Load the custom datasets module if it exists.
|
|
518
|
-
|
|
519
|
-
Args:
|
|
520
|
-
custom_datasets_file:
|
|
521
|
-
The path to the custom datasets module.
|
|
522
|
-
|
|
523
|
-
Raises:
|
|
524
|
-
RuntimeError:
|
|
525
|
-
If the custom datasets module cannot be loaded.
|
|
526
|
-
"""
|
|
527
|
-
if custom_datasets_file.exists():
|
|
528
|
-
spec = importlib.util.spec_from_file_location(
|
|
529
|
-
name="custom_datasets_module", location=str(custom_datasets_file.resolve())
|
|
530
|
-
)
|
|
531
|
-
if spec is None:
|
|
532
|
-
log_once(
|
|
533
|
-
"Could not load the spec for the custom datasets file from "
|
|
534
|
-
f"{custom_datasets_file.resolve()}.",
|
|
535
|
-
level=logging.ERROR,
|
|
536
|
-
)
|
|
537
|
-
return None
|
|
538
|
-
module = importlib.util.module_from_spec(spec=spec)
|
|
539
|
-
if spec.loader is None:
|
|
540
|
-
log_once(
|
|
541
|
-
"Could not load the module for the custom datasets file from "
|
|
542
|
-
f"{custom_datasets_file.resolve()}.",
|
|
543
|
-
level=logging.ERROR,
|
|
544
|
-
)
|
|
545
|
-
return None
|
|
546
|
-
spec.loader.exec_module(module)
|
|
547
|
-
return module
|
|
548
|
-
return None
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
class attention_backend:
|
|
552
|
-
"""Context manager to temporarily set the attention backend.
|
|
553
|
-
|
|
554
|
-
This sets the `VLLM_ATTENTION_BACKEND` environment variable to the desired value
|
|
555
|
-
for the duration of the context manager, and restores the previous value afterwards.
|
|
556
|
-
"""
|
|
557
|
-
|
|
558
|
-
def __init__(self, value: str | None) -> None:
|
|
559
|
-
"""Initialise the context manager.
|
|
560
|
-
|
|
561
|
-
Args:
|
|
562
|
-
value:
|
|
563
|
-
The name of the attention backend to set. If None then no change is
|
|
564
|
-
made. Also, if the user has already set the `VLLM_ATTENTION_BACKEND` env
|
|
565
|
-
var, then no change is made.
|
|
566
|
-
"""
|
|
567
|
-
user_has_set_backend = (
|
|
568
|
-
os.environ.get("USER_HAS_SET_VLLM_ATTENTION_BACKEND", "0") == "1"
|
|
569
|
-
)
|
|
570
|
-
self.value = None if user_has_set_backend else value
|
|
571
|
-
self.previous_value: str | None = None
|
|
572
|
-
|
|
573
|
-
def __enter__(self) -> None:
|
|
574
|
-
"""Enter the context manager."""
|
|
575
|
-
if self.value is None:
|
|
576
|
-
return
|
|
577
|
-
self.previous_value = os.getenv("VLLM_ATTENTION_BACKEND")
|
|
578
|
-
os.environ["VLLM_ATTENTION_BACKEND"] = self.value
|
|
579
|
-
|
|
580
|
-
def __exit__(
|
|
581
|
-
self,
|
|
582
|
-
exc_type: t.Type[BaseException] | None,
|
|
583
|
-
exc_value: BaseException | None,
|
|
584
|
-
exc_tb: TracebackType | None,
|
|
585
|
-
) -> None:
|
|
586
|
-
"""Exit the context manager.
|
|
587
|
-
|
|
588
|
-
Args:
|
|
589
|
-
exc_type:
|
|
590
|
-
The type of the exception.
|
|
591
|
-
exc_value:
|
|
592
|
-
The value of the exception.
|
|
593
|
-
exc_tb:
|
|
594
|
-
The traceback of the exception.
|
|
595
|
-
"""
|
|
596
|
-
if self.value is None:
|
|
597
|
-
return
|
|
598
|
-
if self.previous_value is None:
|
|
599
|
-
os.environ.pop("VLLM_ATTENTION_BACKEND", None)
|
|
600
|
-
else:
|
|
601
|
-
os.environ["VLLM_ATTENTION_BACKEND"] = self.previous_value
|