ScandEval 16.11.0__py3-none-any.whl → 16.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. scandeval/__init__.py +0 -9
  2. scandeval/async_utils.py +46 -0
  3. scandeval/benchmark_config_factory.py +31 -2
  4. scandeval/benchmark_modules/fresh.py +2 -1
  5. scandeval/benchmark_modules/hf.py +76 -23
  6. scandeval/benchmark_modules/litellm.py +33 -15
  7. scandeval/benchmark_modules/vllm.py +97 -44
  8. scandeval/benchmarker.py +29 -33
  9. scandeval/cli.py +11 -0
  10. scandeval/constants.py +36 -2
  11. scandeval/custom_dataset_configs.py +152 -0
  12. scandeval/data_loading.py +87 -31
  13. scandeval/data_models.py +405 -224
  14. scandeval/dataset_configs/__init__.py +51 -25
  15. scandeval/dataset_configs/albanian.py +1 -1
  16. scandeval/dataset_configs/belarusian.py +47 -0
  17. scandeval/dataset_configs/bulgarian.py +1 -1
  18. scandeval/dataset_configs/catalan.py +1 -1
  19. scandeval/dataset_configs/croatian.py +1 -1
  20. scandeval/dataset_configs/danish.py +3 -2
  21. scandeval/dataset_configs/dutch.py +16 -5
  22. scandeval/dataset_configs/english.py +4 -3
  23. scandeval/dataset_configs/estonian.py +8 -7
  24. scandeval/dataset_configs/faroese.py +1 -1
  25. scandeval/dataset_configs/finnish.py +5 -4
  26. scandeval/dataset_configs/french.py +6 -5
  27. scandeval/dataset_configs/german.py +4 -3
  28. scandeval/dataset_configs/greek.py +1 -1
  29. scandeval/dataset_configs/hungarian.py +1 -1
  30. scandeval/dataset_configs/icelandic.py +4 -3
  31. scandeval/dataset_configs/italian.py +4 -3
  32. scandeval/dataset_configs/latvian.py +2 -2
  33. scandeval/dataset_configs/lithuanian.py +1 -1
  34. scandeval/dataset_configs/norwegian.py +6 -5
  35. scandeval/dataset_configs/polish.py +4 -3
  36. scandeval/dataset_configs/portuguese.py +5 -4
  37. scandeval/dataset_configs/romanian.py +2 -2
  38. scandeval/dataset_configs/serbian.py +1 -1
  39. scandeval/dataset_configs/slovene.py +1 -1
  40. scandeval/dataset_configs/spanish.py +4 -3
  41. scandeval/dataset_configs/swedish.py +4 -3
  42. scandeval/dataset_configs/ukrainian.py +1 -1
  43. scandeval/generation_utils.py +6 -6
  44. scandeval/metrics/__init__.py +1 -0
  45. scandeval/metrics/bias.py +237 -0
  46. scandeval/metrics/huggingface.py +2 -1
  47. scandeval/metrics/llm_as_a_judge.py +1 -1
  48. scandeval/metrics/pipeline.py +1 -1
  49. scandeval/model_cache.py +34 -4
  50. scandeval/prompt_templates/linguistic_acceptability.py +9 -0
  51. scandeval/prompt_templates/multiple_choice.py +9 -0
  52. scandeval/prompt_templates/named_entity_recognition.py +21 -0
  53. scandeval/prompt_templates/reading_comprehension.py +10 -0
  54. scandeval/prompt_templates/sentiment_classification.py +11 -0
  55. scandeval/string_utils.py +157 -0
  56. scandeval/task_group_utils/sequence_classification.py +2 -5
  57. scandeval/task_group_utils/token_classification.py +2 -4
  58. scandeval/tasks.py +22 -0
  59. scandeval/tokenisation_utils.py +12 -1
  60. scandeval/utils.py +13 -383
  61. scandeval-16.13.0.dist-info/METADATA +334 -0
  62. scandeval-16.13.0.dist-info/RECORD +94 -0
  63. scandeval-16.11.0.dist-info/METADATA +0 -649
  64. scandeval-16.11.0.dist-info/RECORD +0 -89
  65. {scandeval-16.11.0.dist-info → scandeval-16.13.0.dist-info}/WHEEL +0 -0
  66. {scandeval-16.11.0.dist-info → scandeval-16.13.0.dist-info}/entry_points.txt +0 -0
  67. {scandeval-16.11.0.dist-info → scandeval-16.13.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,649 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: ScandEval
3
- Version: 16.11.0
4
- Summary: The robust European language model benchmark.
5
- Project-URL: Repository, https://github.com/EuroEval/EuroEval
6
- Project-URL: Issues, https://github.com/EuroEval/EuroEval/issues
7
- Author-email: Dan Saattrup Smart <dan.smart@alexandra.dk>
8
- Maintainer-email: Dan Saattrup Smart <dan.smart@alexandra.dk>
9
- License: MIT License
10
-
11
- Copyright (c) 2022-2026 Dan Saattrup Smart
12
-
13
- Permission is hereby granted, free of charge, to any person obtaining a copy
14
- of this software and associated documentation files (the "Software"), to deal
15
- in the Software without restriction, including without limitation the rights
16
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17
- copies of the Software, and to permit persons to whom the Software is
18
- furnished to do so, subject to the following conditions:
19
-
20
- The above copyright notice and this permission notice shall be included in all
21
- copies or substantial portions of the Software.
22
-
23
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29
- SOFTWARE.
30
- License-File: LICENSE
31
- Requires-Python: <4.0,>=3.11
32
- Requires-Dist: accelerate>=1.9.0
33
- Requires-Dist: bert-score>=0.3.13
34
- Requires-Dist: click>=8.1.3
35
- Requires-Dist: cloudpickle>=3.1.1
36
- Requires-Dist: datasets>=3.5.0
37
- Requires-Dist: demjson3>=3.0.6
38
- Requires-Dist: evaluate>=0.4.1
39
- Requires-Dist: huggingface-hub>=0.30.1
40
- Requires-Dist: levenshtein>=0.24.0
41
- Requires-Dist: litellm>=1.75.6
42
- Requires-Dist: mistral-common[soundfile]
43
- Requires-Dist: more-itertools>=10.5.0
44
- Requires-Dist: numpy>=2.0.0
45
- Requires-Dist: ollama>=0.5.1
46
- Requires-Dist: pandas>=2.2.0
47
- Requires-Dist: peft>=0.15.0
48
- Requires-Dist: protobuf>=2.0.0
49
- Requires-Dist: pydantic>=2.6.0
50
- Requires-Dist: pyinfer>=0.0.3
51
- Requires-Dist: python-dotenv>=1.0.1
52
- Requires-Dist: rouge-score>=0.1.2
53
- Requires-Dist: sacrebleu>=2.5.1
54
- Requires-Dist: sacremoses>=0.1.1
55
- Requires-Dist: scikit-learn==1.6.1
56
- Requires-Dist: sentencepiece>=0.1.96
57
- Requires-Dist: seqeval>=1.2.2
58
- Requires-Dist: setuptools>=75.8.2
59
- Requires-Dist: tenacity>=9.0.0
60
- Requires-Dist: termcolor>=2.0.0
61
- Requires-Dist: torch>=2.6.0
62
- Requires-Dist: transformers[mistral-common]>=4.56.0
63
- Provides-Extra: all
64
- Requires-Dist: bitsandbytes>=0.43.1; (platform_system == 'Linux') and extra == 'all'
65
- Requires-Dist: fbgemm-gpu>=1.0.0; (platform_system == 'Linux') and extra == 'all'
66
- Requires-Dist: ray>=2.53.0; (platform_system == 'Linux') and extra == 'all'
67
- Requires-Dist: timm>=1.0.19; extra == 'all'
68
- Requires-Dist: vllm[flashinfer]==0.11.0; (platform_system == 'Linux') and extra == 'all'
69
- Provides-Extra: generative
70
- Requires-Dist: bitsandbytes>=0.43.1; (platform_system == 'Linux') and extra == 'generative'
71
- Requires-Dist: fbgemm-gpu>=1.0.0; (platform_system == 'Linux') and extra == 'generative'
72
- Requires-Dist: ray>=2.53.0; (platform_system == 'Linux') and extra == 'generative'
73
- Requires-Dist: timm>=1.0.19; extra == 'generative'
74
- Requires-Dist: vllm[flashinfer]==0.11.0; (platform_system == 'Linux') and extra == 'generative'
75
- Description-Content-Type: text/markdown
76
-
77
- <!-- This disables the requirement that the first line is a top-level heading -->
78
- <!-- markdownlint-configure-file { "MD041": false } -->
79
-
80
- <div align='center'>
81
- <img
82
- src="https://raw.githubusercontent.com/EuroEval/EuroEval/main/gfx/euroeval.png"
83
- height="500"
84
- width="372"
85
- >
86
- </div>
87
-
88
- ### The robust European language model benchmark
89
-
90
- (formerly known as ScandEval)
91
-
92
- ______________________________________________________________________
93
- [![Documentation](https://img.shields.io/badge/docs-passing-green)](https://euroeval.com)
94
- [![PyPI Status](https://badge.fury.io/py/euroeval.svg)](https://pypi.org/project/euroeval/)
95
- [![First paper](https://img.shields.io/badge/arXiv-2304.00906-b31b1b.svg)](https://arxiv.org/abs/2304.00906)
96
- [![Second paper](https://img.shields.io/badge/arXiv-2406.13469-b31b1b.svg)](https://arxiv.org/abs/2406.13469)
97
- [![License](https://img.shields.io/github/license/EuroEval/EuroEval)](https://github.com/EuroEval/EuroEval/blob/main/LICENSE)
98
- [![LastCommit](https://img.shields.io/github/last-commit/EuroEval/EuroEval)](https://github.com/EuroEval/EuroEval/commits/main)
99
- [![Code Coverage](https://img.shields.io/badge/Coverage-70%25-yellow.svg)](https://github.com/EuroEval/EuroEval/tree/main/tests)
100
- [![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.0-4baaaa.svg)](https://github.com/EuroEval/EuroEval/blob/main/CODE_OF_CONDUCT.md)
101
-
102
- ## Maintainer
103
-
104
- - Dan Saattrup Smart ([@saattrupdan](https://github.com/saattrupdan), <dan.smart@alexandra.dk>)
105
-
106
- ## Installation
107
-
108
- To install the package simply write the following command in your favorite terminal:
109
-
110
- ```bash
111
- pip install euroeval[all]
112
- ```
113
-
114
- This will install the EuroEval package with all extras. You can also install the
115
- minimal version by leaving out the `[all]`, in which case the package will let you know
116
- when an evaluation requires a certain extra dependency, and how you install it.
117
-
118
- ## Quickstart
119
-
120
- ### Benchmarking from the command line
121
-
122
- The easiest way to benchmark pretrained models is via the command line interface. After
123
- having installed the package, you can benchmark your favorite model like so:
124
-
125
- ```bash
126
- euroeval --model <model-id-or-path>
127
- ```
128
-
129
- Here `model` is either the HuggingFace model ID, which can be found on the [HuggingFace
130
- Hub](https://huggingface.co/models), or a local path to a model directory (containing
131
- the model files as well as the `config.json` file). By default this will benchmark the
132
- model on all the tasks available. If you want to benchmark on a particular task, then
133
- use the `--task` argument:
134
-
135
- ```bash
136
- euroeval --model <model-id-or-path> --task sentiment-classification
137
- ```
138
-
139
- We can also narrow down which languages we would like to benchmark on. This can be done
140
- by setting the `--language` argument. Here we thus benchmark the model on the Danish
141
- sentiment classification task:
142
-
143
- ```bash
144
- euroeval --model <model-id-or-path> --task sentiment-classification --language da
145
- ```
146
-
147
- Multiple models, datasets and/or languages can be specified by just attaching multiple
148
- arguments. Here is an example with two models:
149
-
150
- ```bash
151
- euroeval --model <model-id-or-path-1> --model <model-id-or-path-2>
152
- ```
153
-
154
- The specific model version/revision to use can also be added after the suffix '@':
155
-
156
- ```bash
157
- euroeval --model <model-id-or-path>@<commit>
158
- ```
159
-
160
- This can be a branch name, a tag name, or a commit id. It defaults to 'main' for latest.
161
-
162
- See all the arguments and options available for the `euroeval` command by typing
163
-
164
- ```bash
165
- euroeval --help
166
- ```
167
-
168
- ### Benchmarking from a script
169
-
170
- In a script, the syntax is similar to the command line interface. You simply initialise
171
- an object of the `Benchmarker` class, and call this benchmark object with your favorite
172
- model:
173
-
174
- ```python
175
- >>> from euroeval import Benchmarker
176
- >>> benchmarker = Benchmarker()
177
- >>> benchmarker.benchmark(model="<model-id-or-path>")
178
- ```
179
-
180
- To benchmark on a specific task and/or language, you simply specify the `task` or
181
- `language` arguments, shown here with same example as above:
182
-
183
- ```python
184
- >>> benchmarker.benchmark(
185
- ... model="<model-id-or-path>",
186
- ... task="sentiment-classification",
187
- ... language="da",
188
- ... )
189
- ```
190
-
191
- If you want to benchmark a subset of all the models on the Hugging Face Hub, you can
192
- simply leave out the `model` argument. In this example, we're benchmarking all Danish
193
- models on the Danish sentiment classification task:
194
-
195
- ```python
196
- >>> benchmarker.benchmark(task="sentiment-classification", language="da")
197
- ```
198
-
199
- ### Benchmarking from Docker
200
-
201
- A Dockerfile is provided in the repo, which can be downloaded and run, without needing
202
- to clone the repo and installing from source. This can be fetched programmatically by
203
- running the following:
204
-
205
- ```bash
206
- wget https://raw.githubusercontent.com/EuroEval/EuroEval/main/Dockerfile.cuda
207
- ```
208
-
209
- Next, to be able to build the Docker image, first ensure that the NVIDIA Container
210
- Toolkit is
211
- [installed](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html#installation)
212
- and
213
- [configured](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html#configuring-docker).
214
- Ensure that the the CUDA version stated at the top of the Dockerfile matches the CUDA
215
- version installed (which you can check using `nvidia-smi`). After that, we build the
216
- image as follows:
217
-
218
- ```bash
219
- docker build --pull -t euroeval -f Dockerfile.cuda .
220
- ```
221
-
222
- With the Docker image built, we can now evaluate any model as follows:
223
-
224
- ```bash
225
- docker run -e args="<euroeval-arguments>" --gpus 1 --name euroeval --rm euroeval
226
- ```
227
-
228
- Here `<euroeval-arguments>` consists of the arguments added to the `euroeval` CLI
229
- argument. This could for instance be `--model <model-id-or-path> --task
230
- sentiment-classification`.
231
-
232
- ## Benchmarking custom inference APIs
233
-
234
- If the model you want to benchmark is hosted by a custom inference provider, such as a
235
- [vLLM server](https://docs.vllm.ai/en/stable/), then this is also supported in EuroEval.
236
-
237
- When benchmarking, you simply have to set the `--api-base` argument (`api_base` when
238
- using the `Benchmarker` API) to the URL of the inference API, and optionally the
239
- `--api-key` argument (`api_key`) to the API key, if authentication is required.
240
-
241
- If you're benchmarking an Ollama model, then you're urged to add the prefix
242
- `ollama_chat/` to the model name, as that will also fetch model metadata as well as pull
243
- the models from the Ollama model repository before evaluating it, e.g.:
244
-
245
- ```bash
246
- euroeval --model ollama_chat/mymodel --api-base http://localhost:11434
247
- ```
248
-
249
- For all other OpenAI-compatible inference APIs, you simply provide the model name as
250
- is, e.g.:
251
-
252
- ```bash
253
- euroeval --model my-model --api-base http://localhost:8000
254
- ```
255
-
256
- Again, if the inference API requires authentication, you simply add the `--api-key`
257
- argument:
258
-
259
- ```bash
260
- euroeval --model my-model --api-base http://localhost:8000 --api-key my-secret-key
261
- ```
262
-
263
- If your model is a reasoning model, then you need to specify this as follows:
264
-
265
- ```bash
266
- euroeval --model my-reasoning-model --api-base http://localhost:8000 --generative-type reasoning
267
- ```
268
-
269
- Likewise, if it is a pretrained decoder model (aka a completion model), then you specify
270
- this as follows:
271
-
272
- ```bash
273
- euroeval --model my-base-decoder-model --api-base http://localhost:8000 --generative-type base
274
- ```
275
-
276
- When using the `Benchmarker` API, the same applies. Here is an example of benchmarking
277
- an Ollama model hosted locally:
278
-
279
- ```python
280
- >>> benchmarker.benchmark(
281
- ... model="ollama_chat/mymodel",
282
- ... api_base="http://localhost:11434",
283
- ... )
284
- ```
285
-
286
- ## Benchmarking in an offline environment
287
-
288
- If you need to benchmark in an offline environment, you need to download the models,
289
- datasets and metrics beforehand. This can be done by adding the `--download-only`
290
- argument, from the command line, or the `download_only` argument, if benchmarking from a
291
- script. For example to download the model you want and all of the Danish sentiment
292
- classification datasets:
293
-
294
- ```bash
295
- euroeval --model <model-id-or-path> --task sentiment-classification --language da --download-only
296
- ```
297
-
298
- Or from a script:
299
-
300
- ```python
301
- >>> benchmarker.benchmark(
302
- ... model="<model-id-or-path>",
303
- ... task="sentiment-classification",
304
- ... language="da",
305
- ... download_only=True,
306
- ... )
307
- ```
308
-
309
- Please note: Offline benchmarking of adapter models is not currently supported, meaning
310
- that we still require an internet connection during the evaluation of these. If offline
311
- support of adapters is important to you, please consider [opening an
312
- issue](https://github.com/EuroEval/EuroEval/issues).
313
-
314
- ## Benchmarking custom datasets
315
-
316
- If you want to benchmark models on your own custom dataset, this is also possible.
317
- First, you need to set up your dataset to be compatible with EuroEval. This means
318
- splitting up your dataset in a training, validation and test split, and ensuring that
319
- the column names are correct. We use `text` as the column name for the input text, and
320
- the output column name depends on the type of task:
321
-
322
- - **Text or multiple-choice classification**: `label`
323
- - **Token classification**: `labels`
324
- - **Reading comprehension**: `answers`
325
- - **Free-form text generation**: `target_text`
326
-
327
- Text and multiple-choice classification tasks are by far the most common. Next, you
328
- store your three dataset splits as three different CSV files with the desired two
329
- columns. Finally, you create a file called `custom_datasets.py` script in which you
330
- define the associated `DatasetConfig` objects for your dataset. Here is an example of a
331
- simple text classification dataset with two classes:
332
-
333
- ```python
334
- from euroeval import DatasetConfig, TEXT_CLASSIFICATION
335
- from euroeval.languages import ENGLISH
336
-
337
- MY_CONFIG = DatasetConfig(
338
- name="my-dataset",
339
- pretty_name="My Dataset",
340
- source=dict(train="train.csv", val="val.csv", test="test.csv"),
341
- task=TEXT_CLASSIFICATION,
342
- languages=[ENGLISH],
343
- _labels=["positive", "negative"],
344
- )
345
- ```
346
-
347
- You can then benchmark your custom dataset by simply running
348
-
349
- ```bash
350
- euroeval --dataset my-dataset --model <model-id-or-path>
351
- ```
352
-
353
- You can also run the benchmark from a Python script, by simply providing your custom
354
- dataset configuration directly into the `benchmark` method:
355
-
356
- ```python
357
- from euroeval import Benchmarker
358
-
359
- benchmarker = Benchmarker()
360
- benchmarker.benchmark(model="<model-id-or-path>", dataset=MY_CONFIG)
361
- ```
362
-
363
- We have included three convenience tasks to make it easier to set up custom datasets:
364
-
365
- - `TEXT_CLASSIFICATION`, which is used for text classification tasks. This requires you
366
- to set the `_labels` argument in the `DatasetConfig`, and requires the columns `text`
367
- and `label` to be present in the dataset.
368
- - `MULTIPLE_CHOICE`, which is used for multiple-choice classification tasks. This
369
- also requires you to set the `_labels` argument in the `DatasetConfig`. Note that for
370
- multiple choice tasks, you need to set up your `text` column to also list all the
371
- choices, and all the samples should have the same number of choices. This requires the
372
- columns `text` and `label` to be present in the dataset.
373
- - `TOKEN_CLASSIFICATION`, which is used when classifying individual tokens in a text.
374
- This also require you to set the `_labels` argument in the `DatasetConfig`. This
375
- requires the columns `tokens` and `labels` to be present in the dataset, where
376
- `tokens` is a list of tokens/words in the text, and `labels` is a list of the
377
- corresponding labels for each token (so the two lists have the same length).
378
-
379
- On top of these three convenience tasks, there are of course also the tasks that we use
380
- in the official benchmark, which you can use if you want to use one of these tasks with
381
- your own bespoke dataset:
382
-
383
- - `LA`, for linguistic acceptability datasets.
384
- - `NER`, for named entity recognition datasets with the standard BIO tagging scheme.
385
- - `RC`, for reading comprehension datasets in the SQuAD format.
386
- - `SENT`, for sentiment classification datasets.
387
- - `SUMM`, for text summarisation datasets.
388
- - `KNOW`, for multiple-choice knowledge datasets (e.g., MMLU).
389
- - `MCRC`, for multiple-choice reading comprehension datasets (e.g., Belebele).
390
- - `COMMON_SENSE`, for multiple-choice common-sense reasoning datasets (e.g., HellaSwag).
391
-
392
- These can all be imported from `euroeval.tasks` module.
393
-
394
- ### Creating your own custom task
395
-
396
- You are of course also free to define your own task from scratch, which allows you to
397
- customise the prompts used when evaluating generative models, for instance. Here is an
398
- example of a custom free-form text generation task, where the goal for the model is to
399
- generate a SQL query based on a natural language input:
400
-
401
- ```python
402
- from euroeval import DatasetConfig
403
- from euroeval.data_models import Task, PromptConfig
404
- from euroeval.enums import TaskGroup, ModelType
405
- from euroeval.languages import ENGLISH
406
- from euroeval.metrics import rouge_l_metric
407
-
408
- sql_generation_task = Task(
409
- name="sql-generation",
410
- task_group=TaskGroup.TEXT_TO_TEXT,
411
- template_dict={
412
- ENGLISH: PromptConfig(
413
- default_prompt_prefix="The following are natural language texts and their "
414
- "corresponding SQL queries.",
415
- default_prompt_template="Natural language query: {text}\nSQL query: "
416
- "{target_text}",
417
- default_instruction_prompt="Generate the SQL query for the following "
418
- "natural language query:\n{text!r}",
419
- default_prompt_label_mapping=dict(),
420
- ),
421
- },
422
- metrics=[rouge_l_metric],
423
- default_num_few_shot_examples=3,
424
- default_max_generated_tokens=256,
425
- default_allowed_model_types=[ModelType.GENERATIVE],
426
- )
427
-
428
- MY_SQL_DATASET = DatasetConfig(
429
- name="my-sql-dataset",
430
- pretty_name="My SQL Dataset",
431
- source=dict(train="train.csv", val="val.csv", test="test.csv"),
432
- task=sql_generation_task,
433
- languages=[ENGLISH],
434
- )
435
- ```
436
-
437
- Again, with this you can benchmark your custom dataset by simply running
438
-
439
- ```bash
440
- euroeval --dataset my-sql-dataset --model <model-id-or-path>
441
- ```
442
-
443
- ## Reproducing the evaluation datasets
444
-
445
- All datasets used in this project are generated using the scripts located in the
446
- [src/scripts](src/scripts) folder. To reproduce a dataset, run the corresponding script
447
- with the following command
448
-
449
- ```bash
450
- uv run src/scripts/<name-of-script>.py
451
- ```
452
-
453
- Replace <name-of-script> with the specific script you wish to execute, e.g.,
454
-
455
- ```bash
456
- uv run src/scripts/create_allocine.py
457
- ```
458
-
459
- ## Contributors :pray:
460
-
461
- A huge thank you to all the contributors who have helped make this project a success!
462
-
463
- <a href="https://github.com/peter-sk">
464
- <img
465
- src="https://avatars.githubusercontent.com/u/6168908"
466
- width=50
467
- alt="Contributor avatar for peter-sk"
468
- />
469
- </a>
470
- <a href="https://github.com/AJDERS">
471
- <img
472
- src="https://avatars.githubusercontent.com/u/38854604"
473
- width=50
474
- alt="Contributor avatar for AJDERS"
475
- />
476
- </a>
477
- <a href="https://github.com/oliverkinch">
478
- <img
479
- src="https://avatars.githubusercontent.com/u/71556498"
480
- width=50
481
- alt="Contributor avatar for oliverkinch"
482
- />
483
- </a>
484
- <a href="https://github.com/versae">
485
- <img
486
- src="https://avatars.githubusercontent.com/u/173537"
487
- width=50
488
- alt="Contributor avatar for versae"
489
- />
490
- </a>
491
- <a href="https://github.com/KennethEnevoldsen">
492
- <img
493
- src="https://avatars.githubusercontent.com/u/23721977"
494
- width=50
495
- alt="Contributor avatar for KennethEnevoldsen"
496
- />
497
- </a>
498
- <a href="https://github.com/viggo-gascou">
499
- <img
500
- src="https://avatars.githubusercontent.com/u/94069687"
501
- width=50
502
- alt="Contributor avatar for viggo-gascou"
503
- />
504
- </a>
505
- <a href="https://github.com/mathiasesn">
506
- <img
507
- src="https://avatars.githubusercontent.com/u/27091759"
508
- width=50
509
- alt="Contributor avatar for mathiasesn"
510
- />
511
- </a>
512
- <a href="https://github.com/Alkarex">
513
- <img
514
- src="https://avatars.githubusercontent.com/u/1008324"
515
- width=50
516
- alt="Contributor avatar for Alkarex"
517
- />
518
- </a>
519
- <a href="https://github.com/marksverdhei">
520
- <img
521
- src="https://avatars.githubusercontent.com/u/46672778"
522
- width=50
523
- alt="Contributor avatar for marksverdhei"
524
- />
525
- </a>
526
- <a href="https://github.com/Mikeriess">
527
- <img
528
- src="https://avatars.githubusercontent.com/u/19728563"
529
- width=50
530
- alt="Contributor avatar for Mikeriess"
531
- />
532
- </a>
533
- <a href="https://github.com/ThomasKluiters">
534
- <img
535
- src="https://avatars.githubusercontent.com/u/8137941"
536
- width=50
537
- alt="Contributor avatar for ThomasKluiters"
538
- />
539
- </a>
540
- <a href="https://github.com/BramVanroy">
541
- <img
542
- src="https://avatars.githubusercontent.com/u/2779410"
543
- width=50
544
- alt="Contributor avatar for BramVanroy"
545
- />
546
- </a>
547
- <a href="https://github.com/peregilk">
548
- <img
549
- src="https://avatars.githubusercontent.com/u/9079808"
550
- width=50
551
- alt="Contributor avatar for peregilk"
552
- />
553
- </a>
554
- <a href="https://github.com/Rijgersberg">
555
- <img
556
- src="https://avatars.githubusercontent.com/u/8604946"
557
- width=50
558
- alt="Contributor avatar for Rijgersberg"
559
- />
560
- </a>
561
- <a href="https://github.com/duarteocarmo">
562
- <img
563
- src="https://avatars.githubusercontent.com/u/26342344"
564
- width=50
565
- alt="Contributor avatar for duarteocarmo"
566
- />
567
- </a>
568
- <a href="https://github.com/slowwavesleep">
569
- <img
570
- src="https://avatars.githubusercontent.com/u/44175589"
571
- width=50
572
- alt="Contributor avatar for slowwavesleep"
573
- />
574
- </a>
575
- <a href="https://github.com/mrkowalski">
576
- <img
577
- src="https://avatars.githubusercontent.com/u/6357044"
578
- width=50
579
- alt="Contributor avatar for mrkowalski"
580
- />
581
- </a>
582
- <a href="https://github.com/simonevanbruggen">
583
- <img
584
- src="https://avatars.githubusercontent.com/u/24842609"
585
- width=50
586
- alt="Contributor avatar for simonevanbruggen"
587
- />
588
- </a>
589
- <a href="https://github.com/tvosch">
590
- <img
591
- src="https://avatars.githubusercontent.com/u/110661769"
592
- width=50
593
- alt="Contributor avatar for tvosch"
594
- />
595
- </a>
596
- <a href="https://github.com/Touzen">
597
- <img
598
- src="https://avatars.githubusercontent.com/u/1416265"
599
- width=50
600
- alt="Contributor avatar for Touzen"
601
- />
602
- </a>
603
-
604
- ### Contribute to EuroEval
605
-
606
- We welcome contributions to EuroEval! Whether you're fixing bugs, adding features, or
607
- contributing new datasets, your help makes this project better for everyone.
608
-
609
- - **General contributions**: Check out our [contribution guidelines](CONTRIBUTING.md)
610
- for information on how to get started.
611
- - **Adding datasets**: If you're interested in adding a new dataset to EuroEval, we have
612
- a [dedicated guide](NEW_DATASET_GUIDE.md) with step-by-step instructions.
613
-
614
- ### Special thanks
615
-
616
- - Thanks to [Google](https://google.com/) for sponsoring Gemini credits as part of their
617
- [Google Cloud for Researchers Program](https://cloud.google.com/edu/researchers).
618
- - Thanks [@Mikeriess](https://github.com/Mikeriess) for evaluating many of the larger
619
- models on the leaderboards.
620
- - Thanks to [OpenAI](https://openai.com/) for sponsoring OpenAI credits as part of their
621
- [Researcher Access Program](https://openai.com/form/researcher-access-program/).
622
- - Thanks to [UWV](https://www.uwv.nl/) and [KU
623
- Leuven](https://www.arts.kuleuven.be/ling/ccl) for sponsoring the Azure OpenAI
624
- credits used to evaluate GPT-4-turbo in Dutch.
625
- - Thanks to [Miðeind](https://mideind.is/en) for sponsoring the OpenAI
626
- credits used to evaluate GPT-4-turbo in Icelandic and Faroese.
627
- - Thanks to [CHC](https://chc.au.dk/) for sponsoring the OpenAI credits used to
628
- evaluate GPT-4-turbo in German.
629
-
630
- ## Citing EuroEval
631
-
632
- If you want to cite the framework then feel free to use this:
633
-
634
- ```bibtex
635
- @article{smart2024encoder,
636
- title={Encoder vs Decoder: Comparative Analysis of Encoder and Decoder Language Models on Multilingual NLU Tasks},
637
- author={Smart, Dan Saattrup and Enevoldsen, Kenneth and Schneider-Kamp, Peter},
638
- journal={arXiv preprint arXiv:2406.13469},
639
- year={2024}
640
- }
641
- @inproceedings{smart2023scandeval,
642
- author = {Smart, Dan Saattrup},
643
- booktitle = {Proceedings of the 24th Nordic Conference on Computational Linguistics (NoDaLiDa)},
644
- month = may,
645
- pages = {185--201},
646
- title = {{ScandEval: A Benchmark for Scandinavian Natural Language Processing}},
647
- year = {2023}
648
- }
649
- ```