EuroEval 15.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of EuroEval might be problematic. Click here for more details.

Files changed (40) hide show
  1. euroeval/__init__.py +72 -0
  2. euroeval/benchmark_config_factory.py +358 -0
  3. euroeval/benchmark_modules/__init__.py +7 -0
  4. euroeval/benchmark_modules/base.py +354 -0
  5. euroeval/benchmark_modules/fresh.py +286 -0
  6. euroeval/benchmark_modules/hf.py +1185 -0
  7. euroeval/benchmark_modules/litellm.py +905 -0
  8. euroeval/benchmark_modules/vllm.py +1171 -0
  9. euroeval/benchmarker.py +1074 -0
  10. euroeval/callbacks.py +72 -0
  11. euroeval/cli.py +281 -0
  12. euroeval/constants.py +50 -0
  13. euroeval/data_loading.py +96 -0
  14. euroeval/data_models.py +474 -0
  15. euroeval/dataset_configs.py +2001 -0
  16. euroeval/enums.py +144 -0
  17. euroeval/exceptions.py +191 -0
  18. euroeval/finetuning.py +324 -0
  19. euroeval/generation.py +296 -0
  20. euroeval/human_evaluation.py +737 -0
  21. euroeval/languages.py +200 -0
  22. euroeval/model_cache.py +253 -0
  23. euroeval/model_config.py +77 -0
  24. euroeval/model_loading.py +78 -0
  25. euroeval/scores.py +90 -0
  26. euroeval/speed_benchmark.py +124 -0
  27. euroeval/task_utils/__init__.py +1 -0
  28. euroeval/task_utils/multiple_choice_classification.py +176 -0
  29. euroeval/task_utils/question_answering.py +698 -0
  30. euroeval/task_utils/sequence_classification.py +237 -0
  31. euroeval/task_utils/text_to_text.py +150 -0
  32. euroeval/task_utils/token_classification.py +464 -0
  33. euroeval/tasks.py +202 -0
  34. euroeval/types.py +97 -0
  35. euroeval/utils.py +574 -0
  36. euroeval-15.2.0.dist-info/METADATA +234 -0
  37. euroeval-15.2.0.dist-info/RECORD +40 -0
  38. euroeval-15.2.0.dist-info/WHEEL +4 -0
  39. euroeval-15.2.0.dist-info/entry_points.txt +4 -0
  40. euroeval-15.2.0.dist-info/licenses/LICENSE +21 -0
euroeval/scores.py ADDED
@@ -0,0 +1,90 @@
1
+ """Aggregation of raw scores into the mean and a confidence interval."""
2
+
3
+ import logging
4
+ import typing as t
5
+ import warnings
6
+
7
+ import numpy as np
8
+
9
+ if t.TYPE_CHECKING:
10
+ from .data_models import MetricConfig
11
+ from .types import ScoreDict
12
+
13
+ logger = logging.getLogger("euroeval")
14
+
15
+
16
+ def log_scores(
17
+ dataset_name: str,
18
+ metric_configs: list["MetricConfig"],
19
+ scores: list[dict[str, float]],
20
+ model_id: str,
21
+ ) -> "ScoreDict":
22
+ """Log the scores.
23
+
24
+ Args:
25
+ dataset_name:
26
+ Name of the dataset.
27
+ metric_configs:
28
+ List of metrics to log.
29
+ scores:
30
+ The scores that are to be logged. This is a list of dictionaries full of
31
+ scores.
32
+ model_id:
33
+ The full Hugging Face Hub path to the pretrained transformer model.
34
+
35
+ Returns:
36
+ A dictionary with keys 'raw_scores' and 'total', with 'raw_scores' being
37
+ identical to `scores` and 'total' being a dictionary with the aggregated scores
38
+ (means and standard errors).
39
+ """
40
+ logger.info(f"Finished evaluation of {model_id} on {dataset_name}.")
41
+
42
+ total_dict: dict[str, float] = dict()
43
+ for metric_cfg in metric_configs:
44
+ test_score, test_se = aggregate_scores(scores=scores, metric_config=metric_cfg)
45
+ test_score, test_score_str = metric_cfg.postprocessing_fn(test_score)
46
+ test_se, test_se_str = metric_cfg.postprocessing_fn(test_se)
47
+ total_dict[f"test_{metric_cfg.name}"] = test_score
48
+ total_dict[f"test_{metric_cfg.name}_se"] = test_se
49
+ logger.info(f"{metric_cfg.pretty_name}: {test_score_str} ± {test_se_str}")
50
+
51
+ return dict(raw=scores, total=total_dict)
52
+
53
+
54
+ def aggregate_scores(
55
+ scores: list[dict[str, float]], metric_config: "MetricConfig"
56
+ ) -> tuple[float, float]:
57
+ """Helper function to compute the mean with confidence intervals.
58
+
59
+ Args:
60
+ scores:
61
+ Dictionary with the names of the metrics as keys, of the form
62
+ "<split>_<metric_name>", such as "val_f1", and values the metric values.
63
+ metric_config:
64
+ The configuration of the metric, which is used to collect the correct
65
+ metric from `scores`.
66
+
67
+ Returns:
68
+ A pair of floats, containing the score and the radius of its 95% confidence
69
+ interval.
70
+ """
71
+ with warnings.catch_warnings():
72
+ warnings.simplefilter("ignore")
73
+
74
+ test_scores = [
75
+ (
76
+ dct[metric_config.name]
77
+ if metric_config.name in dct
78
+ else dct[f"test_{metric_config.name}"]
79
+ )
80
+ for dct in scores
81
+ ]
82
+ test_score = np.mean(test_scores).item()
83
+
84
+ if len(test_scores) > 1:
85
+ sample_std = np.std(test_scores, ddof=1)
86
+ test_se = sample_std / np.sqrt(len(test_scores))
87
+ else:
88
+ test_se = np.nan
89
+
90
+ return (test_score, 1.96 * test_se)
@@ -0,0 +1,124 @@
1
+ """Benchmarking model inference speed."""
2
+
3
+ import logging
4
+
5
+ import pyinfer
6
+ from tqdm.auto import tqdm
7
+ from transformers import AutoTokenizer
8
+
9
+ from .benchmark_modules import (
10
+ BenchmarkModule,
11
+ HuggingFaceEncoderModel,
12
+ LiteLLMModel,
13
+ VLLMModel,
14
+ )
15
+ from .data_models import BenchmarkConfig
16
+ from .exceptions import InvalidBenchmark
17
+ from .utils import clear_memory
18
+
19
+ logger = logging.getLogger("euroeval")
20
+
21
+
22
+ def benchmark_speed(
23
+ model: "BenchmarkModule", benchmark_config: "BenchmarkConfig"
24
+ ) -> list[dict[str, float]]:
25
+ """Benchmark model inference speed.
26
+
27
+ Args:
28
+ model:
29
+ Model to use.
30
+ benchmark_config:
31
+ Configuration for the benchmark.
32
+
33
+ Returns:
34
+ Dictionary of scores.
35
+ """
36
+ scores: list[dict[str, float]] = list()
37
+ for idx in tqdm(
38
+ iterable=range(benchmark_config.num_iterations),
39
+ desc="Benchmarking",
40
+ disable=not benchmark_config.progress_bar,
41
+ ):
42
+ itr_scores = benchmark_speed_single_iteration(model=model, itr_idx=idx)
43
+ clear_memory()
44
+ scores.append(itr_scores)
45
+ logger.debug(f"Scores for iteration {idx}: {itr_scores}")
46
+ return scores
47
+
48
+
49
+ def benchmark_speed_single_iteration(
50
+ model: "BenchmarkModule", itr_idx: int
51
+ ) -> dict[str, float]:
52
+ """Run a single iteration of the speed benchmark.
53
+
54
+ Args:
55
+ model:
56
+ The model to use in the benchmark.
57
+ itr_idx:
58
+ The index of the iteration.
59
+
60
+ Returns:
61
+ A dictionary containing the scores for the current iteration.
62
+ """
63
+ gpt2_tokenizer = AutoTokenizer.from_pretrained("gpt2", trust_remote_code=True)
64
+
65
+ base_doc = "Document which contains roughly 10 tokens. "
66
+ multiplier = 10 * (1 + itr_idx)
67
+ doc = base_doc * multiplier
68
+ short_multiplier = 1.25 * (1 + itr_idx)
69
+ short_doc = base_doc * round(short_multiplier)
70
+
71
+ def generate_messages_predict(doc: str) -> None:
72
+ model.generate(inputs=dict(messages=[[dict(role="user", content=doc)]]))
73
+
74
+ def generate_prompt_predict(doc: str) -> None:
75
+ model.generate(inputs=dict(text=[doc]))
76
+
77
+ def encoder_predict(doc: str) -> None:
78
+ tokenizer = model.get_tokenizer()
79
+ pytorch_model = model.get_pytorch_module()
80
+ inputs = {
81
+ key: tensor.to(pytorch_model.device)
82
+ for key, tensor in tokenizer(
83
+ text=[doc], truncation=True, return_tensors="pt"
84
+ ).items()
85
+ }
86
+ pytorch_model(**inputs)
87
+
88
+ if isinstance(model, VLLMModel):
89
+ predict = generate_prompt_predict
90
+ elif isinstance(model, LiteLLMModel):
91
+ predict = generate_messages_predict
92
+ elif isinstance(model, HuggingFaceEncoderModel):
93
+ predict = encoder_predict
94
+ else:
95
+ raise ValueError(f"Model type {model} not supported for speed benchmark")
96
+
97
+ try:
98
+ # Do a warmup run, as the first run is always slower
99
+ pyinfer.InferenceReport(model=predict, inputs=base_doc, n_seconds=1).run(
100
+ print_report=False
101
+ )
102
+
103
+ speed_scores = pyinfer.InferenceReport(
104
+ model=predict, inputs=doc, n_seconds=3
105
+ ).run(print_report=False)
106
+ num_gpt2_tokens = len(gpt2_tokenizer([doc], truncation=True)["input_ids"][0])
107
+ gpt2_tokens_per_second = speed_scores["Infer(p/sec)"] * num_gpt2_tokens
108
+
109
+ speed_scores_short = pyinfer.InferenceReport(
110
+ model=predict, inputs=short_doc, n_seconds=3
111
+ ).run(print_report=False)
112
+ num_gpt2_tokens_short = len(
113
+ gpt2_tokenizer([short_doc], truncation=True)["input_ids"][0]
114
+ )
115
+ gpt2_tokens_per_second_short = (
116
+ speed_scores_short["Infer(p/sec)"] * num_gpt2_tokens_short
117
+ )
118
+
119
+ except (RuntimeError, ValueError, IndexError) as e:
120
+ raise InvalidBenchmark(f"Speed benchmark failed with error: {e!r}")
121
+
122
+ return dict(
123
+ test_speed=gpt2_tokens_per_second, test_speed_short=gpt2_tokens_per_second_short
124
+ )
@@ -0,0 +1 @@
1
+ """Utility functions related to the different tasks and task groups."""
@@ -0,0 +1,176 @@
1
+ """Utility functions related to the multiple-choice classification task group."""
2
+
3
+ import hashlib
4
+ import logging
5
+ import re
6
+ import typing as t
7
+ from collections import defaultdict
8
+
9
+ import numpy as np
10
+ from datasets import Dataset
11
+ from transformers import BatchEncoding, PreTrainedTokenizer, Trainer
12
+
13
+ if t.TYPE_CHECKING:
14
+ from ..types import Labels, Predictions
15
+
16
+ logger = logging.getLogger("euroeval")
17
+
18
+
19
+ class MultipleChoiceClassificationTrainer(Trainer):
20
+ """Trainer subclass for question answering tasks."""
21
+
22
+ def evaluate(
23
+ self,
24
+ eval_dataset: "Dataset | None" = None,
25
+ ignore_keys: list[str] | None = None,
26
+ metric_key_prefix: str = "eval",
27
+ ) -> dict[str, float] | None:
28
+ """Evaluate the model on the given dataset.
29
+
30
+ Args:
31
+ eval_dataset:
32
+ The dataset to evaluate on. If None, then use the stored evaluation
33
+ dataset.
34
+ ignore_keys:
35
+ The keys to ignore when computing the metrics.
36
+ metric_key_prefix:
37
+ The prefix to use for the metric keys.
38
+
39
+ Returns:
40
+ The metrics computed on the evaluation dataset.
41
+ """
42
+ eval_dataloader = self.get_eval_dataloader(eval_dataset)
43
+
44
+ eval_loop = (
45
+ self.prediction_loop
46
+ if self.args.use_legacy_prediction_loop
47
+ else self.evaluation_loop
48
+ )
49
+ output = eval_loop(
50
+ eval_dataloader,
51
+ description="Evaluation",
52
+ prediction_loss_only=None,
53
+ ignore_keys=ignore_keys,
54
+ metric_key_prefix=metric_key_prefix,
55
+ )
56
+
57
+ if metric_key_prefix == "test":
58
+ preds_and_labels = postprocess_predictions_and_labels(
59
+ predictions=output.predictions, dataset=eval_dataset
60
+ )
61
+ output.metrics.update(self.compute_metrics(preds_and_labels))
62
+
63
+ # Prefix all keys with metric_key_prefix + '_'
64
+ for key in list(output.metrics.keys()):
65
+ if not key.startswith(f"{metric_key_prefix}_"):
66
+ output.metrics[f"{metric_key_prefix}_{key}"] = output.metrics.pop(
67
+ key
68
+ )
69
+
70
+ # Only the main node log the results by default
71
+ if self.args.should_log:
72
+ self.log(output.metrics)
73
+
74
+ self.control = self.callback_handler.on_evaluate(
75
+ self.args,
76
+ self.state,
77
+ self.control, # type: ignore[has-type]
78
+ output.metrics,
79
+ )
80
+ return output.metrics
81
+
82
+
83
+ def prepare_examples(
84
+ examples: "BatchEncoding", tokenizer: "PreTrainedTokenizer"
85
+ ) -> "BatchEncoding":
86
+ """Prepare the features.
87
+
88
+ Args:
89
+ examples:
90
+ The examples to prepare.
91
+ tokenizer:
92
+ The tokenizer to use to prepare the examples.
93
+
94
+ Returns:
95
+ The prepared examples.
96
+ """
97
+ doc: str = examples["text"][0]
98
+ sections = doc.split("\n")
99
+
100
+ choice_idxs = [
101
+ idx
102
+ for idx, section in enumerate(sections)
103
+ if re.match(pattern=r"^[a-e]\. ", string=section) is not None
104
+ ]
105
+ choices = [sections[idx] for idx in choice_idxs]
106
+
107
+ # Check that the choices are present, and that all of them are at the end
108
+ assert len(choices) > 0, "No choices found in the document."
109
+ assert all(
110
+ choice_idx == len(sections) - i
111
+ for i, choice_idx in enumerate(sorted(choice_idxs, reverse=True), start=1)
112
+ ), "Choices are not at the end of the document."
113
+
114
+ question_idx = min(choice_idxs) - 2 # -2 to remove the 'Choices:' line
115
+ context_and_question = "\n".join(sections[: question_idx + 1]).strip()
116
+
117
+ new_examples = tokenizer(
118
+ text=[context_and_question] * len(choices),
119
+ text_pair=[choice[3:] for choice in choices],
120
+ padding=True,
121
+ truncation=True,
122
+ )
123
+ new_examples["label"] = [
124
+ int(choice.startswith(f"{letter}. ") and letter == examples["label"][0])
125
+ for letter, choice in zip("abcde", choices)
126
+ ]
127
+ new_examples["id"] = [hashlib.md5(string=doc.encode()).hexdigest()] * len(choices)
128
+ return new_examples
129
+
130
+
131
+ def postprocess_predictions_and_labels(
132
+ predictions: np.ndarray, dataset: "Dataset"
133
+ ) -> tuple["Predictions", "Labels"]:
134
+ """Postprocess the predictions and labels.
135
+
136
+ Args:
137
+ predictions:
138
+ The model predictions, of shape (num_examples, 2).
139
+ dataset:
140
+ The dataset containing the examples.
141
+
142
+ Returns:
143
+ The postprocessed predictions and labels.
144
+ """
145
+ mapping = {0: "a", 1: "b", 2: "c", 3: "d", 4: "e"}
146
+
147
+ all_predictions: list[str] = list()
148
+ all_labels: list[str] = list()
149
+
150
+ pred_label_dict = defaultdict(list)
151
+ for pred_arr, example in zip(predictions, dataset):
152
+ pred_label_dict[example["id"]].append((pred_arr[1], example["label"]))
153
+
154
+ # Compute the final predictions and labels
155
+ for id_ in set(dataset["id"]):
156
+ preds, labels = zip(*pred_label_dict[id_])
157
+
158
+ # Some IDs appear multiple times in the dataset, since we are bootstrapping.
159
+ # Here we separate them into their respective groups.
160
+ assert len(labels) % sum(labels) == 0, (
161
+ "The number of labels is not divisible by the sum of the labels."
162
+ )
163
+ group_size = len(labels) // sum(labels)
164
+ preds_groups = [
165
+ preds[i : i + group_size] for i in range(0, len(preds), group_size)
166
+ ]
167
+ labels_groups = [
168
+ labels[i : i + group_size] for i in range(0, len(labels), group_size)
169
+ ]
170
+ for preds_group, labels_group in zip(preds_groups, labels_groups):
171
+ prediction: str = mapping[np.argmax(preds_group).item()]
172
+ label: str = mapping[np.argmax(labels_group).item()]
173
+ all_predictions.append(prediction)
174
+ all_labels.append(label)
175
+
176
+ return all_predictions, all_labels