pearmut 0.2.8__tar.gz → 0.2.10__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pearmut-0.2.8 → pearmut-0.2.10}/PKG-INFO +50 -4
- {pearmut-0.2.8 → pearmut-0.2.10}/README.md +50 -4
- {pearmut-0.2.8 → pearmut-0.2.10}/pearmut.egg-info/PKG-INFO +50 -4
- {pearmut-0.2.8 → pearmut-0.2.10}/pyproject.toml +1 -1
- {pearmut-0.2.8 → pearmut-0.2.10}/server/app.py +65 -2
- {pearmut-0.2.8 → pearmut-0.2.10}/server/cli.py +3 -1
- pearmut-0.2.10/server/static/dashboard.bundle.js +1 -0
- {pearmut-0.2.8 → pearmut-0.2.10}/server/static/dashboard.html +28 -1
- pearmut-0.2.10/server/static/listwise.bundle.js +1 -0
- pearmut-0.2.10/server/static/pointwise.bundle.js +1 -0
- pearmut-0.2.8/server/static/dashboard.bundle.js +0 -1
- pearmut-0.2.8/server/static/listwise.bundle.js +0 -1
- pearmut-0.2.8/server/static/pointwise.bundle.js +0 -1
- {pearmut-0.2.8 → pearmut-0.2.10}/LICENSE +0 -0
- {pearmut-0.2.8 → pearmut-0.2.10}/pearmut.egg-info/SOURCES.txt +0 -0
- {pearmut-0.2.8 → pearmut-0.2.10}/pearmut.egg-info/dependency_links.txt +0 -0
- {pearmut-0.2.8 → pearmut-0.2.10}/pearmut.egg-info/entry_points.txt +0 -0
- {pearmut-0.2.8 → pearmut-0.2.10}/pearmut.egg-info/requires.txt +0 -0
- {pearmut-0.2.8 → pearmut-0.2.10}/pearmut.egg-info/top_level.txt +0 -0
- {pearmut-0.2.8 → pearmut-0.2.10}/server/assignment.py +0 -0
- {pearmut-0.2.8 → pearmut-0.2.10}/server/static/assets/favicon.svg +0 -0
- {pearmut-0.2.8 → pearmut-0.2.10}/server/static/assets/style.css +0 -0
- {pearmut-0.2.8 → pearmut-0.2.10}/server/static/index.html +0 -0
- {pearmut-0.2.8 → pearmut-0.2.10}/server/static/listwise.html +0 -0
- {pearmut-0.2.8 → pearmut-0.2.10}/server/static/pointwise.html +0 -0
- {pearmut-0.2.8 → pearmut-0.2.10}/server/utils.py +0 -0
- {pearmut-0.2.8 → pearmut-0.2.10}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pearmut
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.10
|
|
4
4
|
Summary: A tool for evaluation of model outputs, primarily MT.
|
|
5
5
|
Author-email: Vilém Zouhar <vilem.zouhar@gmail.com>
|
|
6
6
|
License: MIT
|
|
@@ -47,9 +47,13 @@ Dynamic: license-file
|
|
|
47
47
|
- [Hosting Assets](#hosting-assets)
|
|
48
48
|
- [Campaign Management](#campaign-management)
|
|
49
49
|
- [CLI Commands](#cli-commands)
|
|
50
|
+
- [Terminology](#terminology)
|
|
50
51
|
- [Development](#development)
|
|
51
52
|
- [Citation](#citation)
|
|
52
53
|
|
|
54
|
+
|
|
55
|
+
**Error Span** — A highlighted segment of text marked as containing an error, with optional severity (`minor`, `major`, `neutral`) and MQM category labels.
|
|
56
|
+
|
|
53
57
|
## Quick Start
|
|
54
58
|
|
|
55
59
|
Install and run locally without cloning:
|
|
@@ -278,12 +282,54 @@ Management link (shown when adding campaigns or running server) provides:
|
|
|
278
282
|
- Task progress reset (data preserved)
|
|
279
283
|
- Download progress and annotations
|
|
280
284
|
|
|
281
|
-
<img width="
|
|
285
|
+
<img width="1000" alt="Management dashboard" src="https://github.com/user-attachments/assets/8953252c-d7b1-428c-a974-5bc7501457c7" />
|
|
282
286
|
|
|
283
287
|
Completion tokens are shown at annotation end for verification (download correct tokens from dashboard). Incorrect tokens can be shown if quality control fails.
|
|
284
288
|
|
|
285
289
|
<img width="500" alt="Token on completion" src="https://github.com/user-attachments/assets/40eb904c-f47a-4011-aa63-9a4f1c501549" />
|
|
286
290
|
|
|
291
|
+
### Model Results Display
|
|
292
|
+
|
|
293
|
+
Add `&results` to dashboard URL to show model rankings (requires valid token).
|
|
294
|
+
Items need `model` field (pointwise) or `models` field (listwise) and the `protocol_score` needs to be enable such that the `score` can be used for the ranking:
|
|
295
|
+
```python
|
|
296
|
+
{"doc_id": "1", "model": "CommandA", "src": "...", "tgt": "..."}
|
|
297
|
+
{"doc_id": "2", "models": ["CommandA", "Claude"], "src": "...", "tgt": ["...", "..."]}
|
|
298
|
+
```
|
|
299
|
+
See an example in [Campaign Management](#campaign-management)
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
## Terminology
|
|
303
|
+
|
|
304
|
+
- **Campaign**: An annotation project that contains configuration, data, and user assignments. Each campaign has a unique identifier and is defined in a JSON file.
|
|
305
|
+
- **Campaign File**: A JSON file that defines the campaign configuration, including the campaign ID, assignment type, protocol settings, and annotation data.
|
|
306
|
+
- **Campaign ID**: A unique identifier for a campaign (e.g., `"wmt25_#_en-cs_CZ"`). Used to reference and manage specific campaigns.
|
|
307
|
+
- **Task**: A unit of work assigned to a user. In task-based assignment, each task consists of a predefined set of items for a specific user.
|
|
308
|
+
- **Item** — A single annotation unit within a task. For translation evaluation, an item typically represents a document (source text and target translation). Items can contain text, images, audio, or video.
|
|
309
|
+
- **Document** — A collection of one or more segments (sentence pairs or text units) that are evaluated together as a single item.
|
|
310
|
+
- **User** / **Annotator**: A person who performs annotations in a campaign. Each user is identified by a unique user ID and accesses the campaign through a unique URL.
|
|
311
|
+
- **Attention Check** — A validation item with known correct answers used to ensure annotator quality. Can be:
|
|
312
|
+
- **Loud**: Shows warning message and forces retry on failure
|
|
313
|
+
- **Silent**: Logs failures without notifying the user (for quality control analysis)
|
|
314
|
+
- **Token** — A completion code shown to users when they finish their annotations. Tokens verify the completion and whether the user passed quality control checks:
|
|
315
|
+
- **Pass Token** (`token_pass`): Shown when user meets validation thresholds
|
|
316
|
+
- **Fail Token** (`token_fail`): Shown when user fails to meet validation requirements
|
|
317
|
+
- **Tutorial**: An instructional validation item that teaches users how to annotate. Includes `allow_skip: true` to let users skip if they have seen it before.
|
|
318
|
+
- **Validation**: Quality control rules attached to items that check if annotations match expected criteria (score ranges, error span locations, etc.). Used for tutorials and attention checks.
|
|
319
|
+
- **Model**: The system or model that generated the output being evaluated (e.g., `"GPT-4"`, `"Claude"`). Used for tracking and ranking model performance.
|
|
320
|
+
- **Dashboard**: The management interface that shows campaign progress, annotator statistics, access links, and allows downloading annotations. Accessed via a special management URL with token authentication.
|
|
321
|
+
- **Protocol**: The annotation scheme defining what data is collected:
|
|
322
|
+
- **Score**: Numeric quality rating (0-100)
|
|
323
|
+
- **Error Spans**: Text highlights marking errors
|
|
324
|
+
- **Error Categories**: MQM taxonomy labels for errors
|
|
325
|
+
- **Template**: The annotation interface type:
|
|
326
|
+
- **Pointwise**: Evaluate one output at a time
|
|
327
|
+
- **Listwise**: Compare multiple outputs simultaneously
|
|
328
|
+
- **Assignment**: The method for distributing items to users:
|
|
329
|
+
- **Task-based**: Each user has predefined items
|
|
330
|
+
- **Single-stream**: Users draw from a shared pool with random assignment
|
|
331
|
+
- **Dynamic**: Work in progress
|
|
332
|
+
|
|
287
333
|
## Development
|
|
288
334
|
|
|
289
335
|
Server responds to data-only requests from frontend (no template coupling). Frontend served from pre-built `static/` on install.
|
|
@@ -295,7 +341,7 @@ cd pearmut
|
|
|
295
341
|
npm install web/ --prefix web/
|
|
296
342
|
npm run build --prefix web/
|
|
297
343
|
# optionally keep running indefinitely to auto-rebuild
|
|
298
|
-
npm watch
|
|
344
|
+
npm run watch --prefix web/
|
|
299
345
|
|
|
300
346
|
# Install as editable
|
|
301
347
|
pip3 install -e .
|
|
@@ -323,7 +369,7 @@ If you use this work in your paper, please cite as following.
|
|
|
323
369
|
author={Vilém Zouhar},
|
|
324
370
|
title={Pearmut: Platform for Evaluating and Reviewing of Multilingual Tasks},
|
|
325
371
|
url={https://github.com/zouharvi/pearmut/},
|
|
326
|
-
year={
|
|
372
|
+
year={2026},
|
|
327
373
|
}
|
|
328
374
|
```
|
|
329
375
|
|
|
@@ -27,9 +27,13 @@
|
|
|
27
27
|
- [Hosting Assets](#hosting-assets)
|
|
28
28
|
- [Campaign Management](#campaign-management)
|
|
29
29
|
- [CLI Commands](#cli-commands)
|
|
30
|
+
- [Terminology](#terminology)
|
|
30
31
|
- [Development](#development)
|
|
31
32
|
- [Citation](#citation)
|
|
32
33
|
|
|
34
|
+
|
|
35
|
+
**Error Span** — A highlighted segment of text marked as containing an error, with optional severity (`minor`, `major`, `neutral`) and MQM category labels.
|
|
36
|
+
|
|
33
37
|
## Quick Start
|
|
34
38
|
|
|
35
39
|
Install and run locally without cloning:
|
|
@@ -258,12 +262,54 @@ Management link (shown when adding campaigns or running server) provides:
|
|
|
258
262
|
- Task progress reset (data preserved)
|
|
259
263
|
- Download progress and annotations
|
|
260
264
|
|
|
261
|
-
<img width="
|
|
265
|
+
<img width="1000" alt="Management dashboard" src="https://github.com/user-attachments/assets/8953252c-d7b1-428c-a974-5bc7501457c7" />
|
|
262
266
|
|
|
263
267
|
Completion tokens are shown at annotation end for verification (download correct tokens from dashboard). Incorrect tokens can be shown if quality control fails.
|
|
264
268
|
|
|
265
269
|
<img width="500" alt="Token on completion" src="https://github.com/user-attachments/assets/40eb904c-f47a-4011-aa63-9a4f1c501549" />
|
|
266
270
|
|
|
271
|
+
### Model Results Display
|
|
272
|
+
|
|
273
|
+
Add `&results` to dashboard URL to show model rankings (requires valid token).
|
|
274
|
+
Items need `model` field (pointwise) or `models` field (listwise) and the `protocol_score` needs to be enable such that the `score` can be used for the ranking:
|
|
275
|
+
```python
|
|
276
|
+
{"doc_id": "1", "model": "CommandA", "src": "...", "tgt": "..."}
|
|
277
|
+
{"doc_id": "2", "models": ["CommandA", "Claude"], "src": "...", "tgt": ["...", "..."]}
|
|
278
|
+
```
|
|
279
|
+
See an example in [Campaign Management](#campaign-management)
|
|
280
|
+
|
|
281
|
+
|
|
282
|
+
## Terminology
|
|
283
|
+
|
|
284
|
+
- **Campaign**: An annotation project that contains configuration, data, and user assignments. Each campaign has a unique identifier and is defined in a JSON file.
|
|
285
|
+
- **Campaign File**: A JSON file that defines the campaign configuration, including the campaign ID, assignment type, protocol settings, and annotation data.
|
|
286
|
+
- **Campaign ID**: A unique identifier for a campaign (e.g., `"wmt25_#_en-cs_CZ"`). Used to reference and manage specific campaigns.
|
|
287
|
+
- **Task**: A unit of work assigned to a user. In task-based assignment, each task consists of a predefined set of items for a specific user.
|
|
288
|
+
- **Item** — A single annotation unit within a task. For translation evaluation, an item typically represents a document (source text and target translation). Items can contain text, images, audio, or video.
|
|
289
|
+
- **Document** — A collection of one or more segments (sentence pairs or text units) that are evaluated together as a single item.
|
|
290
|
+
- **User** / **Annotator**: A person who performs annotations in a campaign. Each user is identified by a unique user ID and accesses the campaign through a unique URL.
|
|
291
|
+
- **Attention Check** — A validation item with known correct answers used to ensure annotator quality. Can be:
|
|
292
|
+
- **Loud**: Shows warning message and forces retry on failure
|
|
293
|
+
- **Silent**: Logs failures without notifying the user (for quality control analysis)
|
|
294
|
+
- **Token** — A completion code shown to users when they finish their annotations. Tokens verify the completion and whether the user passed quality control checks:
|
|
295
|
+
- **Pass Token** (`token_pass`): Shown when user meets validation thresholds
|
|
296
|
+
- **Fail Token** (`token_fail`): Shown when user fails to meet validation requirements
|
|
297
|
+
- **Tutorial**: An instructional validation item that teaches users how to annotate. Includes `allow_skip: true` to let users skip if they have seen it before.
|
|
298
|
+
- **Validation**: Quality control rules attached to items that check if annotations match expected criteria (score ranges, error span locations, etc.). Used for tutorials and attention checks.
|
|
299
|
+
- **Model**: The system or model that generated the output being evaluated (e.g., `"GPT-4"`, `"Claude"`). Used for tracking and ranking model performance.
|
|
300
|
+
- **Dashboard**: The management interface that shows campaign progress, annotator statistics, access links, and allows downloading annotations. Accessed via a special management URL with token authentication.
|
|
301
|
+
- **Protocol**: The annotation scheme defining what data is collected:
|
|
302
|
+
- **Score**: Numeric quality rating (0-100)
|
|
303
|
+
- **Error Spans**: Text highlights marking errors
|
|
304
|
+
- **Error Categories**: MQM taxonomy labels for errors
|
|
305
|
+
- **Template**: The annotation interface type:
|
|
306
|
+
- **Pointwise**: Evaluate one output at a time
|
|
307
|
+
- **Listwise**: Compare multiple outputs simultaneously
|
|
308
|
+
- **Assignment**: The method for distributing items to users:
|
|
309
|
+
- **Task-based**: Each user has predefined items
|
|
310
|
+
- **Single-stream**: Users draw from a shared pool with random assignment
|
|
311
|
+
- **Dynamic**: Work in progress
|
|
312
|
+
|
|
267
313
|
## Development
|
|
268
314
|
|
|
269
315
|
Server responds to data-only requests from frontend (no template coupling). Frontend served from pre-built `static/` on install.
|
|
@@ -275,7 +321,7 @@ cd pearmut
|
|
|
275
321
|
npm install web/ --prefix web/
|
|
276
322
|
npm run build --prefix web/
|
|
277
323
|
# optionally keep running indefinitely to auto-rebuild
|
|
278
|
-
npm watch
|
|
324
|
+
npm run watch --prefix web/
|
|
279
325
|
|
|
280
326
|
# Install as editable
|
|
281
327
|
pip3 install -e .
|
|
@@ -303,8 +349,8 @@ If you use this work in your paper, please cite as following.
|
|
|
303
349
|
author={Vilém Zouhar},
|
|
304
350
|
title={Pearmut: Platform for Evaluating and Reviewing of Multilingual Tasks},
|
|
305
351
|
url={https://github.com/zouharvi/pearmut/},
|
|
306
|
-
year={
|
|
352
|
+
year={2026},
|
|
307
353
|
}
|
|
308
354
|
```
|
|
309
355
|
|
|
310
|
-
Contributions are welcome! Please reach out to [Vilém Zouhar](mailto:vilem.zouhar@gmail.com).
|
|
356
|
+
Contributions are welcome! Please reach out to [Vilém Zouhar](mailto:vilem.zouhar@gmail.com).
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pearmut
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.10
|
|
4
4
|
Summary: A tool for evaluation of model outputs, primarily MT.
|
|
5
5
|
Author-email: Vilém Zouhar <vilem.zouhar@gmail.com>
|
|
6
6
|
License: MIT
|
|
@@ -47,9 +47,13 @@ Dynamic: license-file
|
|
|
47
47
|
- [Hosting Assets](#hosting-assets)
|
|
48
48
|
- [Campaign Management](#campaign-management)
|
|
49
49
|
- [CLI Commands](#cli-commands)
|
|
50
|
+
- [Terminology](#terminology)
|
|
50
51
|
- [Development](#development)
|
|
51
52
|
- [Citation](#citation)
|
|
52
53
|
|
|
54
|
+
|
|
55
|
+
**Error Span** — A highlighted segment of text marked as containing an error, with optional severity (`minor`, `major`, `neutral`) and MQM category labels.
|
|
56
|
+
|
|
53
57
|
## Quick Start
|
|
54
58
|
|
|
55
59
|
Install and run locally without cloning:
|
|
@@ -278,12 +282,54 @@ Management link (shown when adding campaigns or running server) provides:
|
|
|
278
282
|
- Task progress reset (data preserved)
|
|
279
283
|
- Download progress and annotations
|
|
280
284
|
|
|
281
|
-
<img width="
|
|
285
|
+
<img width="1000" alt="Management dashboard" src="https://github.com/user-attachments/assets/8953252c-d7b1-428c-a974-5bc7501457c7" />
|
|
282
286
|
|
|
283
287
|
Completion tokens are shown at annotation end for verification (download correct tokens from dashboard). Incorrect tokens can be shown if quality control fails.
|
|
284
288
|
|
|
285
289
|
<img width="500" alt="Token on completion" src="https://github.com/user-attachments/assets/40eb904c-f47a-4011-aa63-9a4f1c501549" />
|
|
286
290
|
|
|
291
|
+
### Model Results Display
|
|
292
|
+
|
|
293
|
+
Add `&results` to dashboard URL to show model rankings (requires valid token).
|
|
294
|
+
Items need `model` field (pointwise) or `models` field (listwise) and the `protocol_score` needs to be enable such that the `score` can be used for the ranking:
|
|
295
|
+
```python
|
|
296
|
+
{"doc_id": "1", "model": "CommandA", "src": "...", "tgt": "..."}
|
|
297
|
+
{"doc_id": "2", "models": ["CommandA", "Claude"], "src": "...", "tgt": ["...", "..."]}
|
|
298
|
+
```
|
|
299
|
+
See an example in [Campaign Management](#campaign-management)
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
## Terminology
|
|
303
|
+
|
|
304
|
+
- **Campaign**: An annotation project that contains configuration, data, and user assignments. Each campaign has a unique identifier and is defined in a JSON file.
|
|
305
|
+
- **Campaign File**: A JSON file that defines the campaign configuration, including the campaign ID, assignment type, protocol settings, and annotation data.
|
|
306
|
+
- **Campaign ID**: A unique identifier for a campaign (e.g., `"wmt25_#_en-cs_CZ"`). Used to reference and manage specific campaigns.
|
|
307
|
+
- **Task**: A unit of work assigned to a user. In task-based assignment, each task consists of a predefined set of items for a specific user.
|
|
308
|
+
- **Item** — A single annotation unit within a task. For translation evaluation, an item typically represents a document (source text and target translation). Items can contain text, images, audio, or video.
|
|
309
|
+
- **Document** — A collection of one or more segments (sentence pairs or text units) that are evaluated together as a single item.
|
|
310
|
+
- **User** / **Annotator**: A person who performs annotations in a campaign. Each user is identified by a unique user ID and accesses the campaign through a unique URL.
|
|
311
|
+
- **Attention Check** — A validation item with known correct answers used to ensure annotator quality. Can be:
|
|
312
|
+
- **Loud**: Shows warning message and forces retry on failure
|
|
313
|
+
- **Silent**: Logs failures without notifying the user (for quality control analysis)
|
|
314
|
+
- **Token** — A completion code shown to users when they finish their annotations. Tokens verify the completion and whether the user passed quality control checks:
|
|
315
|
+
- **Pass Token** (`token_pass`): Shown when user meets validation thresholds
|
|
316
|
+
- **Fail Token** (`token_fail`): Shown when user fails to meet validation requirements
|
|
317
|
+
- **Tutorial**: An instructional validation item that teaches users how to annotate. Includes `allow_skip: true` to let users skip if they have seen it before.
|
|
318
|
+
- **Validation**: Quality control rules attached to items that check if annotations match expected criteria (score ranges, error span locations, etc.). Used for tutorials and attention checks.
|
|
319
|
+
- **Model**: The system or model that generated the output being evaluated (e.g., `"GPT-4"`, `"Claude"`). Used for tracking and ranking model performance.
|
|
320
|
+
- **Dashboard**: The management interface that shows campaign progress, annotator statistics, access links, and allows downloading annotations. Accessed via a special management URL with token authentication.
|
|
321
|
+
- **Protocol**: The annotation scheme defining what data is collected:
|
|
322
|
+
- **Score**: Numeric quality rating (0-100)
|
|
323
|
+
- **Error Spans**: Text highlights marking errors
|
|
324
|
+
- **Error Categories**: MQM taxonomy labels for errors
|
|
325
|
+
- **Template**: The annotation interface type:
|
|
326
|
+
- **Pointwise**: Evaluate one output at a time
|
|
327
|
+
- **Listwise**: Compare multiple outputs simultaneously
|
|
328
|
+
- **Assignment**: The method for distributing items to users:
|
|
329
|
+
- **Task-based**: Each user has predefined items
|
|
330
|
+
- **Single-stream**: Users draw from a shared pool with random assignment
|
|
331
|
+
- **Dynamic**: Work in progress
|
|
332
|
+
|
|
287
333
|
## Development
|
|
288
334
|
|
|
289
335
|
Server responds to data-only requests from frontend (no template coupling). Frontend served from pre-built `static/` on install.
|
|
@@ -295,7 +341,7 @@ cd pearmut
|
|
|
295
341
|
npm install web/ --prefix web/
|
|
296
342
|
npm run build --prefix web/
|
|
297
343
|
# optionally keep running indefinitely to auto-rebuild
|
|
298
|
-
npm watch
|
|
344
|
+
npm run watch --prefix web/
|
|
299
345
|
|
|
300
346
|
# Install as editable
|
|
301
347
|
pip3 install -e .
|
|
@@ -323,7 +369,7 @@ If you use this work in your paper, please cite as following.
|
|
|
323
369
|
author={Vilém Zouhar},
|
|
324
370
|
title={Pearmut: Platform for Evaluating and Reviewing of Multilingual Tasks},
|
|
325
371
|
url={https://github.com/zouharvi/pearmut/},
|
|
326
|
-
year={
|
|
372
|
+
year={2026},
|
|
327
373
|
}
|
|
328
374
|
```
|
|
329
375
|
|
|
@@ -1,5 +1,7 @@
|
|
|
1
|
+
import collections
|
|
1
2
|
import json
|
|
2
3
|
import os
|
|
4
|
+
import statistics
|
|
3
5
|
from typing import Any
|
|
4
6
|
|
|
5
7
|
from fastapi import FastAPI, Query
|
|
@@ -12,6 +14,7 @@ from .assignment import get_i_item, get_next_item, reset_task, update_progress
|
|
|
12
14
|
from .utils import (
|
|
13
15
|
ROOT,
|
|
14
16
|
check_validation_threshold,
|
|
17
|
+
get_db_log,
|
|
15
18
|
load_progress_data,
|
|
16
19
|
save_db_payload,
|
|
17
20
|
save_progress_data,
|
|
@@ -191,6 +194,58 @@ async def _dashboard_data(request: DashboardDataRequest):
|
|
|
191
194
|
)
|
|
192
195
|
|
|
193
196
|
|
|
197
|
+
class DashboardResultsRequest(BaseModel):
|
|
198
|
+
campaign_id: str
|
|
199
|
+
token: str
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
@app.post("/dashboard-results")
|
|
203
|
+
async def _dashboard_results(request: DashboardResultsRequest):
|
|
204
|
+
campaign_id = request.campaign_id
|
|
205
|
+
token = request.token
|
|
206
|
+
|
|
207
|
+
if campaign_id not in progress_data:
|
|
208
|
+
return JSONResponse(content="Unknown campaign ID", status_code=400)
|
|
209
|
+
|
|
210
|
+
# Check if token is valid
|
|
211
|
+
if token != tasks_data[campaign_id]["token"]:
|
|
212
|
+
return JSONResponse(content="Invalid token", status_code=400)
|
|
213
|
+
|
|
214
|
+
# Compute model scores from annotations
|
|
215
|
+
model_scores = collections.defaultdict(dict)
|
|
216
|
+
|
|
217
|
+
# Iterate through all tasks to find items with 'model' field
|
|
218
|
+
log = get_db_log(campaign_id)
|
|
219
|
+
for entry in log:
|
|
220
|
+
if "item" not in entry or "annotations" not in entry:
|
|
221
|
+
continue
|
|
222
|
+
for item, annotation in zip(entry["item"], entry["annotations"]):
|
|
223
|
+
if "model" in item:
|
|
224
|
+
# pointwise
|
|
225
|
+
if "score" in annotation:
|
|
226
|
+
# make sure to only keep the latest score for each item
|
|
227
|
+
# json.dumps(item) creates a unique item key
|
|
228
|
+
model_scores[item["model"]][json.dumps(item)] = annotation["score"]
|
|
229
|
+
elif "models" in item:
|
|
230
|
+
# listwise
|
|
231
|
+
for model, annotation_cand in zip(item["models"], annotation):
|
|
232
|
+
if "score" in annotation_cand:
|
|
233
|
+
model_scores[model][json.dumps(item)] = (
|
|
234
|
+
annotation_cand["score"]
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
results = [
|
|
238
|
+
{
|
|
239
|
+
"model": model,
|
|
240
|
+
"score": statistics.mean(scores.values()),
|
|
241
|
+
"count": len(scores),
|
|
242
|
+
}
|
|
243
|
+
for model, scores in model_scores.items()
|
|
244
|
+
]
|
|
245
|
+
results.sort(key=lambda x: x["score"], reverse=True)
|
|
246
|
+
return JSONResponse(content=results, status_code=200)
|
|
247
|
+
|
|
248
|
+
|
|
194
249
|
class ResetTaskRequest(BaseModel):
|
|
195
250
|
campaign_id: str
|
|
196
251
|
user_id: str
|
|
@@ -236,7 +291,11 @@ async def _download_annotations(
|
|
|
236
291
|
with open(output_path, "r") as f:
|
|
237
292
|
output[campaign_id] = [json.loads(x) for x in f.readlines()]
|
|
238
293
|
|
|
239
|
-
return JSONResponse(
|
|
294
|
+
return JSONResponse(
|
|
295
|
+
content=output,
|
|
296
|
+
status_code=200,
|
|
297
|
+
headers={"Content-Disposition": 'inline; filename="annotations.json"'}
|
|
298
|
+
)
|
|
240
299
|
|
|
241
300
|
|
|
242
301
|
@app.get("/download-progress")
|
|
@@ -260,7 +319,11 @@ async def _download_progress(
|
|
|
260
319
|
|
|
261
320
|
output[cid] = progress_data[cid]
|
|
262
321
|
|
|
263
|
-
return JSONResponse(
|
|
322
|
+
return JSONResponse(
|
|
323
|
+
content=output,
|
|
324
|
+
status_code=200,
|
|
325
|
+
headers={"Content-Disposition": 'inline; filename="progress.json"'}
|
|
326
|
+
)
|
|
264
327
|
|
|
265
328
|
|
|
266
329
|
static_dir = f"{os.path.dirname(os.path.abspath(__file__))}/static/"
|
|
@@ -41,7 +41,9 @@ def _run(args_unknown):
|
|
|
41
41
|
args.server + "/dashboard.html?" + "&".join([
|
|
42
42
|
f"campaign_id={urllib.parse.quote_plus(campaign_id)}&token={campaign_data["token"]}"
|
|
43
43
|
for campaign_id, campaign_data in tasks_data.items()
|
|
44
|
-
])
|
|
44
|
+
]),
|
|
45
|
+
# this is important to flush
|
|
46
|
+
flush=True,
|
|
45
47
|
)
|
|
46
48
|
|
|
47
49
|
uvicorn.run(
|