pearmut 0.1.1__tar.gz → 0.1.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pearmut-0.1.1 → pearmut-0.1.2}/PKG-INFO +9 -9
- {pearmut-0.1.1 → pearmut-0.1.2}/README.md +8 -8
- {pearmut-0.1.1 → pearmut-0.1.2}/pearmut.egg-info/PKG-INFO +9 -9
- {pearmut-0.1.1 → pearmut-0.1.2}/pearmut.egg-info/SOURCES.txt +3 -1
- {pearmut-0.1.1 → pearmut-0.1.2}/pyproject.toml +1 -1
- {pearmut-0.1.1 → pearmut-0.1.2}/server/app.py +25 -21
- pearmut-0.1.2/server/assignment.py +184 -0
- {pearmut-0.1.1 → pearmut-0.1.2}/server/cli.py +37 -19
- pearmut-0.1.2/server/static/assets/style.css +228 -0
- {pearmut-0.1.1 → pearmut-0.1.2}/server/static/dashboard.bundle.js +1 -1
- pearmut-0.1.2/server/static/listwise.bundle.js +1 -0
- pearmut-0.1.2/server/static/listwise.html +77 -0
- {pearmut-0.1.1 → pearmut-0.1.2}/server/static/pointwise.bundle.js +1 -1
- {pearmut-0.1.1 → pearmut-0.1.2}/server/static/pointwise.html +1 -167
- pearmut-0.1.1/server/protocols.py +0 -122
- pearmut-0.1.1/server/static/assets/style.css +0 -60
- {pearmut-0.1.1 → pearmut-0.1.2}/LICENSE +0 -0
- {pearmut-0.1.1 → pearmut-0.1.2}/pearmut.egg-info/dependency_links.txt +0 -0
- {pearmut-0.1.1 → pearmut-0.1.2}/pearmut.egg-info/entry_points.txt +0 -0
- {pearmut-0.1.1 → pearmut-0.1.2}/pearmut.egg-info/requires.txt +0 -0
- {pearmut-0.1.1 → pearmut-0.1.2}/pearmut.egg-info/top_level.txt +0 -0
- {pearmut-0.1.1 → pearmut-0.1.2}/server/static/assets/favicon.svg +0 -0
- {pearmut-0.1.1 → pearmut-0.1.2}/server/static/dashboard.html +0 -0
- {pearmut-0.1.1 → pearmut-0.1.2}/server/static/index.html +0 -0
- {pearmut-0.1.1 → pearmut-0.1.2}/server/utils.py +0 -0
- {pearmut-0.1.1 → pearmut-0.1.2}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pearmut
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.2
|
|
4
4
|
Summary: A tool for evaluation of model outputs, primarily MT.
|
|
5
5
|
Author-email: Vilém Zouhar <vilem.zouhar@gmail.com>
|
|
6
6
|
License: apache-2.0
|
|
@@ -63,7 +63,7 @@ One of the simplest ones, where each user has a pre-defined list of tasks (`task
|
|
|
63
63
|
```python
|
|
64
64
|
{
|
|
65
65
|
"info": {
|
|
66
|
-
"
|
|
66
|
+
"assignment": "task-based",
|
|
67
67
|
"template": "pointwise",
|
|
68
68
|
"protocol_score": true, # we want scores [0...100] for each segment
|
|
69
69
|
"protocol_error_spans": true, # we want error spans
|
|
@@ -115,19 +115,19 @@ For the standard ones (ESA, DA, MQM), we expect each item to be a dictionary (co
|
|
|
115
115
|
... # definition of another item (document)
|
|
116
116
|
```
|
|
117
117
|
|
|
118
|
-
We also support a
|
|
118
|
+
We also support a simple allocation where all annotators draw from the same pool (`single-stream`). Items are randomly assigned to annotators from the pool of unfinished items:
|
|
119
119
|
```python
|
|
120
120
|
{
|
|
121
121
|
"campaign_id": "my campaign 6",
|
|
122
122
|
"info": {
|
|
123
|
-
"
|
|
123
|
+
"assignment": "single-stream",
|
|
124
124
|
"template": "pointwise",
|
|
125
125
|
"protocol_score": True, # collect scores
|
|
126
126
|
"protocol_error_spans": True, # collect error spans
|
|
127
127
|
"protocol_error_categories": False, # do not collect MQM categories, so ESA
|
|
128
|
-
"
|
|
128
|
+
"num_users": 50, # number of annotators
|
|
129
129
|
},
|
|
130
|
-
"data": [...], # list of all items
|
|
130
|
+
"data": [...], # list of all items (shared among all annotators)
|
|
131
131
|
}
|
|
132
132
|
```
|
|
133
133
|
|
|
@@ -137,10 +137,10 @@ We also support dynamic allocation of annotations (`dynamic`, not yet ⚠️), w
|
|
|
137
137
|
{
|
|
138
138
|
"campaign_id": "my campaign 6",
|
|
139
139
|
"info": {
|
|
140
|
-
"
|
|
140
|
+
"assignment": "dynamic",
|
|
141
141
|
"template": "kway",
|
|
142
142
|
"protocol_k": 5,
|
|
143
|
-
"
|
|
143
|
+
"num_users": 50,
|
|
144
144
|
},
|
|
145
145
|
"data": [...], # list of all items
|
|
146
146
|
}
|
|
@@ -185,7 +185,7 @@ To make changes locally, clone the repository and run the following, which will
|
|
|
185
185
|
cd pearmut
|
|
186
186
|
# watch the frontend for changes (in a separate terminal)
|
|
187
187
|
npm install web/ --prefix web/
|
|
188
|
-
npm run
|
|
188
|
+
npm run build --prefix web/ # `watch` for rebuild on code change
|
|
189
189
|
|
|
190
190
|
# install local package as editable
|
|
191
191
|
pip3 install -e .
|
|
@@ -42,7 +42,7 @@ One of the simplest ones, where each user has a pre-defined list of tasks (`task
|
|
|
42
42
|
```python
|
|
43
43
|
{
|
|
44
44
|
"info": {
|
|
45
|
-
"
|
|
45
|
+
"assignment": "task-based",
|
|
46
46
|
"template": "pointwise",
|
|
47
47
|
"protocol_score": true, # we want scores [0...100] for each segment
|
|
48
48
|
"protocol_error_spans": true, # we want error spans
|
|
@@ -94,19 +94,19 @@ For the standard ones (ESA, DA, MQM), we expect each item to be a dictionary (co
|
|
|
94
94
|
... # definition of another item (document)
|
|
95
95
|
```
|
|
96
96
|
|
|
97
|
-
We also support a
|
|
97
|
+
We also support a simple allocation where all annotators draw from the same pool (`single-stream`). Items are randomly assigned to annotators from the pool of unfinished items:
|
|
98
98
|
```python
|
|
99
99
|
{
|
|
100
100
|
"campaign_id": "my campaign 6",
|
|
101
101
|
"info": {
|
|
102
|
-
"
|
|
102
|
+
"assignment": "single-stream",
|
|
103
103
|
"template": "pointwise",
|
|
104
104
|
"protocol_score": True, # collect scores
|
|
105
105
|
"protocol_error_spans": True, # collect error spans
|
|
106
106
|
"protocol_error_categories": False, # do not collect MQM categories, so ESA
|
|
107
|
-
"
|
|
107
|
+
"num_users": 50, # number of annotators
|
|
108
108
|
},
|
|
109
|
-
"data": [...], # list of all items
|
|
109
|
+
"data": [...], # list of all items (shared among all annotators)
|
|
110
110
|
}
|
|
111
111
|
```
|
|
112
112
|
|
|
@@ -116,10 +116,10 @@ We also support dynamic allocation of annotations (`dynamic`, not yet ⚠️), w
|
|
|
116
116
|
{
|
|
117
117
|
"campaign_id": "my campaign 6",
|
|
118
118
|
"info": {
|
|
119
|
-
"
|
|
119
|
+
"assignment": "dynamic",
|
|
120
120
|
"template": "kway",
|
|
121
121
|
"protocol_k": 5,
|
|
122
|
-
"
|
|
122
|
+
"num_users": 50,
|
|
123
123
|
},
|
|
124
124
|
"data": [...], # list of all items
|
|
125
125
|
}
|
|
@@ -164,7 +164,7 @@ To make changes locally, clone the repository and run the following, which will
|
|
|
164
164
|
cd pearmut
|
|
165
165
|
# watch the frontend for changes (in a separate terminal)
|
|
166
166
|
npm install web/ --prefix web/
|
|
167
|
-
npm run
|
|
167
|
+
npm run build --prefix web/ # `watch` for rebuild on code change
|
|
168
168
|
|
|
169
169
|
# install local package as editable
|
|
170
170
|
pip3 install -e .
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pearmut
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.2
|
|
4
4
|
Summary: A tool for evaluation of model outputs, primarily MT.
|
|
5
5
|
Author-email: Vilém Zouhar <vilem.zouhar@gmail.com>
|
|
6
6
|
License: apache-2.0
|
|
@@ -63,7 +63,7 @@ One of the simplest ones, where each user has a pre-defined list of tasks (`task
|
|
|
63
63
|
```python
|
|
64
64
|
{
|
|
65
65
|
"info": {
|
|
66
|
-
"
|
|
66
|
+
"assignment": "task-based",
|
|
67
67
|
"template": "pointwise",
|
|
68
68
|
"protocol_score": true, # we want scores [0...100] for each segment
|
|
69
69
|
"protocol_error_spans": true, # we want error spans
|
|
@@ -115,19 +115,19 @@ For the standard ones (ESA, DA, MQM), we expect each item to be a dictionary (co
|
|
|
115
115
|
... # definition of another item (document)
|
|
116
116
|
```
|
|
117
117
|
|
|
118
|
-
We also support a
|
|
118
|
+
We also support a simple allocation where all annotators draw from the same pool (`single-stream`). Items are randomly assigned to annotators from the pool of unfinished items:
|
|
119
119
|
```python
|
|
120
120
|
{
|
|
121
121
|
"campaign_id": "my campaign 6",
|
|
122
122
|
"info": {
|
|
123
|
-
"
|
|
123
|
+
"assignment": "single-stream",
|
|
124
124
|
"template": "pointwise",
|
|
125
125
|
"protocol_score": True, # collect scores
|
|
126
126
|
"protocol_error_spans": True, # collect error spans
|
|
127
127
|
"protocol_error_categories": False, # do not collect MQM categories, so ESA
|
|
128
|
-
"
|
|
128
|
+
"num_users": 50, # number of annotators
|
|
129
129
|
},
|
|
130
|
-
"data": [...], # list of all items
|
|
130
|
+
"data": [...], # list of all items (shared among all annotators)
|
|
131
131
|
}
|
|
132
132
|
```
|
|
133
133
|
|
|
@@ -137,10 +137,10 @@ We also support dynamic allocation of annotations (`dynamic`, not yet ⚠️), w
|
|
|
137
137
|
{
|
|
138
138
|
"campaign_id": "my campaign 6",
|
|
139
139
|
"info": {
|
|
140
|
-
"
|
|
140
|
+
"assignment": "dynamic",
|
|
141
141
|
"template": "kway",
|
|
142
142
|
"protocol_k": 5,
|
|
143
|
-
"
|
|
143
|
+
"num_users": 50,
|
|
144
144
|
},
|
|
145
145
|
"data": [...], # list of all items
|
|
146
146
|
}
|
|
@@ -185,7 +185,7 @@ To make changes locally, clone the repository and run the following, which will
|
|
|
185
185
|
cd pearmut
|
|
186
186
|
# watch the frontend for changes (in a separate terminal)
|
|
187
187
|
npm install web/ --prefix web/
|
|
188
|
-
npm run
|
|
188
|
+
npm run build --prefix web/ # `watch` for rebuild on code change
|
|
189
189
|
|
|
190
190
|
# install local package as editable
|
|
191
191
|
pip3 install -e .
|
|
@@ -8,12 +8,14 @@ pearmut.egg-info/entry_points.txt
|
|
|
8
8
|
pearmut.egg-info/requires.txt
|
|
9
9
|
pearmut.egg-info/top_level.txt
|
|
10
10
|
server/app.py
|
|
11
|
+
server/assignment.py
|
|
11
12
|
server/cli.py
|
|
12
|
-
server/protocols.py
|
|
13
13
|
server/utils.py
|
|
14
14
|
server/static/dashboard.bundle.js
|
|
15
15
|
server/static/dashboard.html
|
|
16
16
|
server/static/index.html
|
|
17
|
+
server/static/listwise.bundle.js
|
|
18
|
+
server/static/listwise.html
|
|
17
19
|
server/static/pointwise.bundle.js
|
|
18
20
|
server/static/pointwise.html
|
|
19
21
|
server/static/assets/favicon.svg
|
|
@@ -8,7 +8,7 @@ from fastapi.responses import JSONResponse
|
|
|
8
8
|
from fastapi.staticfiles import StaticFiles
|
|
9
9
|
from pydantic import BaseModel
|
|
10
10
|
|
|
11
|
-
from .
|
|
11
|
+
from .assignment import get_next_item, reset_task, update_progress
|
|
12
12
|
from .utils import ROOT, load_progress_data, save_progress_data
|
|
13
13
|
|
|
14
14
|
os.makedirs(f"{ROOT}/data/outputs", exist_ok=True)
|
|
@@ -111,19 +111,20 @@ async def _dashboard_data(request: DashboardDataRequest):
|
|
|
111
111
|
if campaign_id not in progress_data:
|
|
112
112
|
return JSONResponse(content={"error": "Unknown campaign ID"}, status_code=400)
|
|
113
113
|
|
|
114
|
-
progress_new = {
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
114
|
+
progress_new = {}
|
|
115
|
+
assignment = tasks_data[campaign_id]["info"]["assignment"]
|
|
116
|
+
if assignment not in ["task-based", "single-stream"]:
|
|
117
|
+
return JSONResponse(content={"error": "Unsupported campaign assignment type"}, status_code=400)
|
|
118
|
+
|
|
119
|
+
for user_id, user_val in progress_data[campaign_id].items():
|
|
120
|
+
# shallow copy
|
|
121
|
+
entry = dict(user_val)
|
|
122
|
+
|
|
123
|
+
if not is_privileged:
|
|
124
|
+
entry["token_correct"] = None
|
|
125
|
+
entry["token_incorrect"] = None
|
|
126
|
+
|
|
127
|
+
progress_new[user_id] = entry
|
|
127
128
|
|
|
128
129
|
return JSONResponse(
|
|
129
130
|
content={
|
|
@@ -190,19 +191,22 @@ async def _download_progress(
|
|
|
190
191
|
return JSONResponse(content={"error": "Mismatched campaign_id and token count"}, status_code=400)
|
|
191
192
|
|
|
192
193
|
output = {}
|
|
193
|
-
for
|
|
194
|
-
if
|
|
195
|
-
return JSONResponse(content={"error": f"Unknown campaign ID {
|
|
196
|
-
if token[
|
|
197
|
-
return JSONResponse(content={"error": f"Invalid token for campaign ID {
|
|
194
|
+
for i, cid in enumerate(campaign_id):
|
|
195
|
+
if cid not in progress_data:
|
|
196
|
+
return JSONResponse(content={"error": f"Unknown campaign ID {cid}"}, status_code=400)
|
|
197
|
+
if token[i] != tasks_data[cid]["token"]:
|
|
198
|
+
return JSONResponse(content={"error": f"Invalid token for campaign ID {cid}"}, status_code=400)
|
|
198
199
|
|
|
199
|
-
output[
|
|
200
|
+
output[cid] = progress_data[cid]
|
|
200
201
|
|
|
201
202
|
return JSONResponse(content=output, status_code=200)
|
|
202
203
|
|
|
204
|
+
static_dir = f"{os.path.dirname(os.path.abspath(__file__))}/static/"
|
|
205
|
+
if not os.path.exists(static_dir + "index.html"):
|
|
206
|
+
raise FileNotFoundError("Static directory not found. Please build the frontend first.")
|
|
203
207
|
|
|
204
208
|
app.mount(
|
|
205
209
|
"/",
|
|
206
|
-
StaticFiles(directory=
|
|
210
|
+
StaticFiles(directory=static_dir, html=True, follow_symlink=True),
|
|
207
211
|
name="static",
|
|
208
212
|
)
|
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
import random
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
from fastapi.responses import JSONResponse
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def _completed_response(
|
|
8
|
+
progress_data: dict,
|
|
9
|
+
campaign_id: str,
|
|
10
|
+
user_id: str,
|
|
11
|
+
) -> JSONResponse:
|
|
12
|
+
"""Build a completed response with progress, time, and token."""
|
|
13
|
+
user_progress = progress_data[campaign_id][user_id]
|
|
14
|
+
# TODO: add check for data quality
|
|
15
|
+
is_ok = True
|
|
16
|
+
return JSONResponse(
|
|
17
|
+
content={
|
|
18
|
+
"status": "completed",
|
|
19
|
+
"progress": user_progress["progress"],
|
|
20
|
+
"time": user_progress["time"],
|
|
21
|
+
"token": user_progress["token_correct" if is_ok else "token_incorrect"],
|
|
22
|
+
},
|
|
23
|
+
status_code=200
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def get_next_item(
|
|
28
|
+
campaign_id: str,
|
|
29
|
+
user_id: str,
|
|
30
|
+
tasks_data: dict,
|
|
31
|
+
progress_data: dict,
|
|
32
|
+
) -> JSONResponse:
|
|
33
|
+
"""
|
|
34
|
+
Get the next item for the user in the specified campaign.
|
|
35
|
+
"""
|
|
36
|
+
assignment = tasks_data[campaign_id]["info"]["assignment"]
|
|
37
|
+
if assignment == "task-based":
|
|
38
|
+
return get_next_item_taskbased(campaign_id, user_id, tasks_data, progress_data)
|
|
39
|
+
elif assignment == "single-stream":
|
|
40
|
+
return get_next_item_single_stream(campaign_id, user_id, tasks_data, progress_data)
|
|
41
|
+
elif assignment == "dynamic":
|
|
42
|
+
return get_next_item_dynamic(campaign_id, user_id, tasks_data, progress_data)
|
|
43
|
+
else:
|
|
44
|
+
return JSONResponse(content={"error": "Unknown campaign assignment type"}, status_code=400)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def get_next_item_taskbased(
|
|
48
|
+
campaign_id: str,
|
|
49
|
+
user_id: str,
|
|
50
|
+
data_all: dict,
|
|
51
|
+
progress_data: dict,
|
|
52
|
+
) -> JSONResponse:
|
|
53
|
+
"""
|
|
54
|
+
Get the next item for task-based protocol.
|
|
55
|
+
"""
|
|
56
|
+
user_progress = progress_data[campaign_id][user_id]
|
|
57
|
+
if all(user_progress["progress"]):
|
|
58
|
+
return _completed_response(progress_data, campaign_id, user_id)
|
|
59
|
+
|
|
60
|
+
# find first incomplete item
|
|
61
|
+
item_i = min([i for i, v in enumerate(user_progress["progress"]) if not v])
|
|
62
|
+
return JSONResponse(
|
|
63
|
+
content={
|
|
64
|
+
"status": "ok",
|
|
65
|
+
"progress": user_progress["progress"],
|
|
66
|
+
"time": user_progress["time"],
|
|
67
|
+
"info": {
|
|
68
|
+
"item_i": item_i,
|
|
69
|
+
} | {
|
|
70
|
+
k: v
|
|
71
|
+
for k, v in data_all[campaign_id]["info"].items()
|
|
72
|
+
if k.startswith("protocol")
|
|
73
|
+
},
|
|
74
|
+
"payload": data_all[campaign_id]["data"][user_id][item_i]},
|
|
75
|
+
status_code=200
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def get_next_item_dynamic(campaign_data: dict, user_id: str, progress_data: dict, data_all: dict):
|
|
80
|
+
raise NotImplementedError("Dynamic protocol is not implemented yet.")
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def get_next_item_single_stream(
|
|
84
|
+
campaign_id: str,
|
|
85
|
+
user_id: str,
|
|
86
|
+
data_all: dict,
|
|
87
|
+
progress_data: dict,
|
|
88
|
+
) -> JSONResponse:
|
|
89
|
+
"""
|
|
90
|
+
Get the next item for single-stream protocol.
|
|
91
|
+
In this mode, all users share the same pool of items.
|
|
92
|
+
Items are randomly selected from unfinished items.
|
|
93
|
+
|
|
94
|
+
Note: There is a potential race condition where multiple users could
|
|
95
|
+
receive the same item simultaneously. This is fine since we store all responses.
|
|
96
|
+
"""
|
|
97
|
+
user_progress = progress_data[campaign_id][user_id]
|
|
98
|
+
progress = user_progress["progress"]
|
|
99
|
+
|
|
100
|
+
if all(progress):
|
|
101
|
+
return _completed_response(progress_data, campaign_id, user_id)
|
|
102
|
+
|
|
103
|
+
# find a random incomplete item
|
|
104
|
+
incomplete_indices = [i for i, v in enumerate(progress) if not v]
|
|
105
|
+
item_i = random.choice(incomplete_indices)
|
|
106
|
+
|
|
107
|
+
return JSONResponse(
|
|
108
|
+
content={
|
|
109
|
+
"status": "ok",
|
|
110
|
+
"time": user_progress["time"],
|
|
111
|
+
"progress": progress,
|
|
112
|
+
"info": {
|
|
113
|
+
"item_i": item_i,
|
|
114
|
+
} | {
|
|
115
|
+
k: v
|
|
116
|
+
for k, v in data_all[campaign_id]["info"].items()
|
|
117
|
+
if k.startswith("protocol")
|
|
118
|
+
},
|
|
119
|
+
"payload": data_all[campaign_id]["data"][item_i]},
|
|
120
|
+
status_code=200
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def _reset_user_time(progress_data: dict, campaign_id: str, user_id: str) -> None:
|
|
125
|
+
"""Reset time tracking fields for a user."""
|
|
126
|
+
progress_data[campaign_id][user_id]["time"] = 0.0
|
|
127
|
+
progress_data[campaign_id][user_id]["time_start"] = None
|
|
128
|
+
progress_data[campaign_id][user_id]["time_end"] = None
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def reset_task(
|
|
132
|
+
campaign_id: str,
|
|
133
|
+
user_id: str,
|
|
134
|
+
tasks_data: dict,
|
|
135
|
+
progress_data: dict,
|
|
136
|
+
) -> JSONResponse:
|
|
137
|
+
"""
|
|
138
|
+
Reset the task progress for the user in the specified campaign.
|
|
139
|
+
"""
|
|
140
|
+
assignment = tasks_data[campaign_id]["info"]["assignment"]
|
|
141
|
+
if assignment == "task-based":
|
|
142
|
+
progress_data[campaign_id][user_id]["progress"] = (
|
|
143
|
+
[False]*len(tasks_data[campaign_id]["data"][user_id])
|
|
144
|
+
)
|
|
145
|
+
_reset_user_time(progress_data, campaign_id, user_id)
|
|
146
|
+
return JSONResponse(content={"status": "ok"}, status_code=200)
|
|
147
|
+
elif assignment == "single-stream":
|
|
148
|
+
# for single-stream reset all progress
|
|
149
|
+
for uid in progress_data[campaign_id]:
|
|
150
|
+
progress_data[campaign_id][uid]["progress"] = (
|
|
151
|
+
[False]*len(tasks_data[campaign_id]["data"])
|
|
152
|
+
)
|
|
153
|
+
_reset_user_time(progress_data, campaign_id, user_id)
|
|
154
|
+
return JSONResponse(content={"status": "ok"}, status_code=200)
|
|
155
|
+
else:
|
|
156
|
+
return JSONResponse(content={"status": "error", "message": "Reset not supported for this assignment type"}, status_code=400)
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def update_progress(
|
|
160
|
+
campaign_id: str,
|
|
161
|
+
user_id: str,
|
|
162
|
+
tasks_data: dict,
|
|
163
|
+
progress_data: dict,
|
|
164
|
+
item_i: int,
|
|
165
|
+
payload: Any,
|
|
166
|
+
) -> JSONResponse:
|
|
167
|
+
"""
|
|
168
|
+
Log the user's response for the specified item in the campaign.
|
|
169
|
+
"""
|
|
170
|
+
assignment = tasks_data[campaign_id]["info"]["assignment"]
|
|
171
|
+
if assignment == "task-based":
|
|
172
|
+
# even if it's already set it should be fine
|
|
173
|
+
progress_data[campaign_id][user_id]["progress"][item_i] = True
|
|
174
|
+
# TODO: log attention checks/quality?
|
|
175
|
+
return JSONResponse(content={"status": "ok"}, status_code=200)
|
|
176
|
+
elif assignment == "single-stream":
|
|
177
|
+
# progress all users
|
|
178
|
+
for uid in progress_data[campaign_id]:
|
|
179
|
+
progress_data[campaign_id][uid]["progress"][item_i] = True
|
|
180
|
+
return JSONResponse(content={"status": "ok"}, status_code=200)
|
|
181
|
+
elif assignment == "dynamic":
|
|
182
|
+
return JSONResponse(content={"status": "error", "message": "Dynamic protocol logging not implemented yet."}, status_code=400)
|
|
183
|
+
else:
|
|
184
|
+
return JSONResponse(content={"status": "error", "message": "Unknown campaign assignment type"}, status_code=400)
|
|
@@ -90,33 +90,41 @@ def _add_campaign(args_unknown):
|
|
|
90
90
|
raise ValueError("Campaign data must contain 'info' field.")
|
|
91
91
|
if "data" not in campaign_data:
|
|
92
92
|
raise ValueError("Campaign data must contain 'data' field.")
|
|
93
|
-
if "
|
|
94
|
-
raise ValueError("Campaign 'info' must contain '
|
|
93
|
+
if "assignment" not in campaign_data["info"]:
|
|
94
|
+
raise ValueError("Campaign 'info' must contain 'assignment' field.")
|
|
95
95
|
if "template" not in campaign_data["info"]:
|
|
96
96
|
raise ValueError("Campaign 'info' must contain 'template' field.")
|
|
97
97
|
|
|
98
|
+
assignment = campaign_data["info"]["assignment"]
|
|
98
99
|
# use random words for identifying users
|
|
99
100
|
rng = random.Random(campaign_data["campaign_id"])
|
|
100
101
|
rword = wonderwords.RandomWord(rng=rng)
|
|
101
|
-
if
|
|
102
|
+
if assignment == "task-based":
|
|
102
103
|
tasks = campaign_data["data"]
|
|
103
104
|
if not isinstance(tasks, list):
|
|
104
|
-
raise ValueError(
|
|
105
|
+
raise ValueError(
|
|
106
|
+
"Task-based campaign 'data' must be a list of tasks.")
|
|
105
107
|
if not all(isinstance(task, list) for task in tasks):
|
|
106
|
-
raise ValueError(
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
108
|
+
raise ValueError(
|
|
109
|
+
"Each task in task-based campaign 'data' must be a list of items.")
|
|
110
|
+
num_users = len(tasks)
|
|
111
|
+
elif assignment == "single-stream":
|
|
112
|
+
tasks = campaign_data["data"]
|
|
113
|
+
if "num_users" not in campaign_data["info"]:
|
|
114
|
+
raise ValueError(
|
|
115
|
+
"Single-stream campaigns must specify 'num_users' in info.")
|
|
111
116
|
if not isinstance(campaign_data["data"], list):
|
|
112
|
-
raise ValueError(
|
|
113
|
-
|
|
117
|
+
raise ValueError(
|
|
118
|
+
"Single-stream campaign 'data' must be a list of items.")
|
|
119
|
+
num_users = campaign_data["info"]["num_users"]
|
|
120
|
+
elif assignment == "dynamic":
|
|
121
|
+
raise NotImplementedError(
|
|
122
|
+
"Dynamic campaign assignment is not yet implemented.")
|
|
114
123
|
else:
|
|
115
|
-
raise ValueError(
|
|
116
|
-
f"Unknown campaign type: {campaign_data["info"]['type']}")
|
|
124
|
+
raise ValueError(f"Unknown campaign assignment type: {assignment}")
|
|
117
125
|
|
|
118
126
|
user_ids = []
|
|
119
|
-
while len(user_ids) <
|
|
127
|
+
while len(user_ids) < num_users:
|
|
120
128
|
# generate random user IDs
|
|
121
129
|
new_id = f"{rword.random_words(amount=1, include_parts_of_speech=['adjective'])[0]}-{rword.random_words(amount=1, include_parts_of_speech=['noun'])[0]}"
|
|
122
130
|
if new_id not in user_ids:
|
|
@@ -126,10 +134,15 @@ def _add_campaign(args_unknown):
|
|
|
126
134
|
for user_id in user_ids
|
|
127
135
|
]
|
|
128
136
|
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
137
|
+
# For task-based, data is a dict mapping user_id -> tasks
|
|
138
|
+
# For single-stream, data is a flat list (shared among all users)
|
|
139
|
+
if assignment == "task-based":
|
|
140
|
+
campaign_data["data"] = {
|
|
141
|
+
user_id: task
|
|
142
|
+
for user_id, task in zip(user_ids, tasks)
|
|
143
|
+
}
|
|
144
|
+
elif assignment == "single-stream":
|
|
145
|
+
campaign_data["data"] = tasks
|
|
133
146
|
|
|
134
147
|
# generate a token for dashboard access if not present
|
|
135
148
|
if "token" not in campaign_data:
|
|
@@ -139,7 +152,12 @@ def _add_campaign(args_unknown):
|
|
|
139
152
|
|
|
140
153
|
user_progress = {
|
|
141
154
|
user_id: {
|
|
142
|
-
|
|
155
|
+
# TODO: progress tracking could be based on the assignment type
|
|
156
|
+
"progress": (
|
|
157
|
+
[False]*len(campaign_data["data"][user_id]) if assignment == "task-based"
|
|
158
|
+
else [False]*len(campaign_data["data"]) if assignment == "single-stream"
|
|
159
|
+
else []
|
|
160
|
+
),
|
|
143
161
|
"time_start": None,
|
|
144
162
|
"time_end": None,
|
|
145
163
|
"time": 0,
|