pearmut 1.0.2__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -98,4 +98,4 @@
98
98
  .white-box {
99
99
  overflow-x: auto;
100
100
  }
101
- }</style><script defer="defer" src="dashboard.bundle.js?c0a4cdfa9d1ee5f2e887"></script><link href="style.css?c0a4cdfa9d1ee5f2e887" rel="stylesheet"></head><body style="padding-top: 50px; padding-bottom: 50px;"><div id="main_div" style="width: calc(100% - 200px); min-width: 1300px; margin-left: auto; margin-right: auto;"><div style="margin-bottom: 15px; font-size: 0.9em; color: #555;">💤not started &nbsp;&nbsp; ✍️in progress &nbsp;&nbsp; ✅completed & passed &nbsp;&nbsp;❌completed & failed &nbsp;&nbsp; 🔗annotator link &nbsp;&nbsp; 👁️anotator link (view-only) &nbsp;&nbsp; 🗑️reset progress/campaign &nbsp;&nbsp; ⚖️show model ranking</div><div id="dashboard_div"></div><br><a class="abutton" id="download_progress" style="margin-right: 20px;">Download progress metadata 💾</a> <a class="abutton" id="download_annotations" style="margin-right: 20px;">Download all annotations 💾</a> <button class="abutton" id="add_campaign" style="width: 180px; margin-right: 20px;">Add new campaign 📤</button> <input type="file" id="campaign_file_input" accept=".json" style="display: none;"></div></body></html>
101
+ }</style><script defer="defer" src="dashboard.bundle.js?ee803f203a3b5ee1305d"></script><link href="style.css?ee803f203a3b5ee1305d" rel="stylesheet"></head><body style="padding-top: 50px; padding-bottom: 50px;"><div id="main_div" style="width: calc(100% - 200px); min-width: 1300px; margin-left: auto; margin-right: auto;"><div style="margin-bottom: 15px; font-size: 0.9em; color: #555;">💤not started &nbsp;&nbsp; ✍️in progress &nbsp;&nbsp; ✅completed & passed &nbsp;&nbsp;❌completed & failed &nbsp;&nbsp; 🔗annotator link &nbsp;&nbsp; 👁️anotator link (view-only) &nbsp;&nbsp; 🗑️reset progress/campaign &nbsp;&nbsp; ⚖️show model ranking</div><div id="dashboard_div"></div><br><a class="abutton" id="download_progress" style="margin-right: 20px;">Download progress metadata 💾</a> <a class="abutton" id="download_annotations" style="margin-right: 20px;">Download all annotations 💾</a> <button class="abutton" id="add_campaign" style="width: 180px; margin-right: 20px;">Add new campaign 📤</button> <input type="file" id="campaign_file_input" accept=".json" style="display: none;"></div></body></html>
pearmut/static/index.html CHANGED
@@ -1 +1 @@
1
- <!doctype html><html lang="en" style="height: 100%;"><head><meta charset="UTF-8"><meta name="viewport" content="width=900px"><title>Pearmut Evaluation</title><link rel="icon" type="image/svg+xml" href="favicon.svg"><script defer="defer" src="index.bundle.js?c0a4cdfa9d1ee5f2e887"></script><link href="style.css?c0a4cdfa9d1ee5f2e887" rel="stylesheet"></head><body><div class="white-box" style="width: max-content; font-size: large; position: absolute; top: 50%; left: 50%; transform: translate(-50%, -50%);">You have reached the Pearmut🍐 evaluation interface.<ul><li>If you are an annotator, you should have received a specialized link that takes you to the annotations.</li><li>If you are annotation manager, then you should distribute these links.</li></ul><br><br>See the <a href="https://github.com/zouharvi/pearmut">Pearmut project on GitHub</a>. Made with 💚 by Vilém Zouhar and others in 2025-2026.</div></body></html>
1
+ <!doctype html><html lang="en" style="height: 100%;"><head><meta charset="UTF-8"><meta name="viewport" content="width=900px"><title>Pearmut Evaluation</title><link rel="icon" type="image/svg+xml" href="favicon.svg"><script defer="defer" src="index.bundle.js?ee803f203a3b5ee1305d"></script><link href="style.css?ee803f203a3b5ee1305d" rel="stylesheet"></head><body><div class="white-box" style="width: max-content; font-size: large; position: absolute; top: 50%; left: 50%; transform: translate(-50%, -50%);">You have reached the Pearmut🍐 evaluation interface.<ul><li>If you are an annotator, you should have received a specialized link that takes you to the annotations.</li><li>If you are annotation manager, then you should distribute these links.</li></ul><br><br>See the <a href="https://github.com/zouharvi/pearmut">Pearmut project on GitHub</a>. Made with 💚 by Vilém Zouhar and others in 2025-2026.</div></body></html>
pearmut/static/style.css CHANGED
@@ -3,6 +3,12 @@ body {
3
3
  padding: 0;
4
4
  background: linear-gradient(135deg, #b9e2a1 0%, #e7e2cf 100%);
5
5
  background-attachment: fixed;
6
+
7
+ /* never rescale for phone */
8
+ text-size-adjust: none;
9
+ -webkit-text-size-adjust: none;
10
+ -ms-text-size-adjust: none;
11
+ -moz-text-size-adjust: none;
6
12
  }
7
13
 
8
14
  * {
@@ -244,3 +250,35 @@ input[type="button"].error_delete:hover {
244
250
  .char_missing {
245
251
  font-family: monospace;
246
252
  }
253
+
254
+ /* Form styling */
255
+ .form-container {
256
+ max-width: 600px;
257
+ margin: 20px auto;
258
+ padding: 20px;
259
+ }
260
+
261
+ .form-field {
262
+ margin-bottom: 20px;
263
+ }
264
+
265
+ .form-label {
266
+ font-size: 14pt;
267
+ margin-bottom: 8px;
268
+ color: #333;
269
+ }
270
+
271
+ .form-input {
272
+ width: 100%;
273
+ padding: 10px;
274
+ font-size: 12pt;
275
+ border: 1px solid #ccc;
276
+ border-radius: 4px;
277
+ box-sizing: border-box;
278
+ }
279
+
280
+ .form-input:focus {
281
+ outline: none;
282
+ border-color: #4CAF50;
283
+ box-shadow: 0 0 5px rgba(76, 175, 80, 0.3);
284
+ }
pearmut/utils.py CHANGED
@@ -9,6 +9,7 @@ ROOT = "."
9
9
  RESET_MARKER = "__RESET__"
10
10
  TOKEN_MAIN = hashlib.sha256(random.randbytes(16)).hexdigest()[:10]
11
11
 
12
+
12
13
  def load_progress_data(warn: str | None = None):
13
14
  if not os.path.exists(f"{ROOT}/data/progress.json"):
14
15
  if warn is not None:
@@ -17,7 +18,7 @@ def load_progress_data(warn: str | None = None):
17
18
  f.write(json.dumps({}))
18
19
  with open(f"{ROOT}/data/progress.json", "r") as f:
19
20
  data = json.load(f)
20
-
21
+
21
22
  return data
22
23
 
23
24
 
@@ -38,42 +39,43 @@ def get_db_log(campaign_id: str) -> list[dict]:
38
39
  log_path = f"{ROOT}/data/outputs/{campaign_id}.jsonl"
39
40
  if os.path.exists(log_path):
40
41
  with open(log_path, "r") as f:
41
- _logs[campaign_id] = [
42
- json.loads(line) for line in f.readlines()
43
- ]
42
+ _logs[campaign_id] = [json.loads(line) for line in f.readlines()]
44
43
  else:
45
44
  _logs[campaign_id] = []
46
45
 
47
46
  return _logs[campaign_id]
48
47
 
49
48
 
50
- def get_db_log_item(campaign_id: str, user_id: str | None, item_i: int | None) -> list[dict]:
49
+ def get_db_log_item(
50
+ campaign_id: str, user_id: str | None, item_i: int | str | None
51
+ ) -> list[dict]:
51
52
  """
52
53
  Returns the log item for the given campaign_id, user_id and item_i.
53
54
  Can be empty. Respects reset markers - if a reset marker is found,
54
55
  only entries after the last reset are returned.
55
56
  """
56
57
  log = get_db_log(campaign_id)
57
-
58
+
58
59
  # Filter matching entries
59
60
  matching = [
60
- entry for entry in log
61
+ entry
62
+ for entry in log
61
63
  if (
62
- (user_id is None or entry.get("user_id") == user_id) and
63
- (item_i is None or entry.get("item_i") == item_i)
64
+ (user_id is None or entry.get("user_id") == user_id)
65
+ and (item_i is None or entry.get("item_i") == item_i)
64
66
  )
65
67
  ]
66
-
68
+
67
69
  # Find the last reset marker for this user (if any)
68
70
  last_reset_idx = -1
69
71
  for i, entry in enumerate(matching):
70
72
  if entry.get("annotation") == RESET_MARKER:
71
73
  last_reset_idx = i
72
-
74
+
73
75
  # Return only entries after the last reset
74
76
  if last_reset_idx >= 0:
75
- matching = matching[last_reset_idx + 1:]
76
-
77
+ matching = matching[last_reset_idx + 1 :]
78
+
77
79
  return matching
78
80
 
79
81
 
@@ -89,7 +91,13 @@ def save_db_payload(campaign_id: str, payload: dict):
89
91
  log_path = f"{ROOT}/data/outputs/{campaign_id}.jsonl"
90
92
  os.makedirs(os.path.dirname(log_path), exist_ok=True)
91
93
  with open(log_path, "a") as log_file:
92
- log_file.write(json.dumps(payload, ensure_ascii=False,) + "\n")
94
+ log_file.write(
95
+ json.dumps(
96
+ payload,
97
+ ensure_ascii=False,
98
+ )
99
+ + "\n"
100
+ )
93
101
 
94
102
  log.append(payload)
95
103
 
@@ -102,20 +110,20 @@ def check_validation_threshold(
102
110
  ) -> bool:
103
111
  """
104
112
  Check if user passes the validation threshold.
105
-
113
+
106
114
  The threshold is defined in campaign info as 'validation_threshold':
107
115
  - If integer: pass if number of failed checks <= threshold
108
- - If float in [0, 1): pass if proportion of failed checks <= threshold
116
+ - If float in [0, 1): pass if proportion of failed checks <= threshold
109
117
  - If float >= 1: always fail
110
118
  - If None/not set: defaults to 0 (fail on any failed check)
111
-
119
+
112
120
  Returns True if validation passes, False otherwise.
113
121
  """
114
122
  threshold = tasks_data[campaign_id]["info"].get("validation_threshold", 0)
115
-
123
+
116
124
  user_progress = progress_data[campaign_id][user_id]
117
125
  validations = user_progress.get("validations", {})
118
-
126
+
119
127
  # Count failed checks (validations is dict of item_i -> list of bools)
120
128
  total_checks = 0
121
129
  failed_checks = 0
@@ -128,11 +136,11 @@ def check_validation_threshold(
128
136
  # If no validation checks exist, pass
129
137
  if total_checks == 0:
130
138
  return True
131
-
139
+
132
140
  # Float >= 1: always fail
133
141
  if isinstance(threshold, float) and threshold >= 1:
134
142
  return False
135
-
143
+
136
144
  # Check threshold based on type
137
145
  if isinstance(threshold, float):
138
146
  # Float in [0, 1): proportion-based, pass if failed proportion <= threshold
@@ -140,3 +148,12 @@ def check_validation_threshold(
140
148
  else:
141
149
  # Integer: count-based, pass if failed count <= threshold
142
150
  return failed_checks <= threshold
151
+
152
+
153
+ def is_form_document(items):
154
+ """Check if a document contains form items instead of evaluation items."""
155
+ if not items:
156
+ return False
157
+ # Check if first item has 'text' and 'form' keys (form item)
158
+ first_item = items[0]
159
+ return "text" in first_item and "form" in first_item
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pearmut
3
- Version: 1.0.2
3
+ Version: 1.0.3
4
4
  Summary: A tool for evaluation of model outputs, primarily MT.
5
5
  Author-email: Vilém Zouhar <vilem.zouhar@gmail.com>
6
6
  License: MIT
@@ -35,12 +35,15 @@ Dynamic: license-file
35
35
  - [Assignment Types](#assignment-types)
36
36
  - [Advanced Features](#advanced-features)
37
37
  - [Pre-filled Error Spans (ESA<sup>AI</sup>)](#pre-filled-error-spans-esaai)
38
+ - [Custom MQM Taxonomy](#custom-mqm-taxonomy)
38
39
  - [Tutorial and Attention Checks](#tutorial-and-attention-checks)
40
+ - [Form Items for User Metadata](#form-items-for-user-metadata)
39
41
  - [Pre-defined User IDs and Tokens](#pre-defined-user-ids-and-tokens)
40
42
  - [Multimodal Annotations](#multimodal-annotations)
41
43
  - [Hosting Assets](#hosting-assets)
42
44
  - [Campaign Management](#campaign-management)
43
45
  - [Custom Completion Messages](#custom-completion-messages)
46
+ - [Prolific Integration](#prolific-integration)
44
47
  - [CLI Commands](#cli-commands)
45
48
  - [Terminology](#terminology)
46
49
  - [Development](#development)
@@ -141,6 +144,7 @@ The `shuffle` parameter in campaign `info` controls this behavior:
141
144
  "data": [...]
142
145
  }
143
146
  ```
147
+ Documents in `data_welcome` are not shuffled and so don't require to have the same models in all documents.
144
148
 
145
149
  ### Showing Model Names
146
150
 
@@ -197,6 +201,33 @@ Enable a textfield for post-editing or translation tasks using the `textfield` p
197
201
  - `"visible"`: Textfield always visible
198
202
  - `"prefilled"`: Textfield visible and pre-filled with model output for post-editing
199
203
 
204
+ ### Custom MQM Taxonomy
205
+
206
+ For MQM protocol campaigns, you can define a custom error taxonomy instead of using the default MQM categories. Specify `mqm_categories` in the campaign `info` section as a dictionary mapping main categories to lists of subcategories:
207
+
208
+
209
+ ```python
210
+ {
211
+ "info": {
212
+ "assignment": "task-based",
213
+ "protocol": "MQM",
214
+ "mqm_categories": {
215
+ "": [], # Empty selection option
216
+ "General": ["", "Accuracy", "Fluency"],
217
+ "Audio-specific": ["", "Inaudible", "Background noise", "Speaker overlap", "Misinterpretation"],
218
+ "Style": ["", "Awkward", "Embarassing"],
219
+ "Unknown": [] # Category with no subcategories
220
+ }
221
+ },
222
+ "campaign_id": "custom_mqm_example",
223
+ "data": [...]
224
+ }
225
+ ```
226
+
227
+ If `mqm_categories` is not provided, the default MQM taxonomy will be used. The empty string key `""` provides an unselected state in the dropdown. Categories with empty subcategory lists (e.g., `"Style": []`) do not require a subcategory selection.
228
+
229
+ See [examples/custom_mqm.json](examples/custom_mqm.json) for a complete example.
230
+
200
231
  ### Custom Instructions
201
232
 
202
233
  Set campaign-level instructions using the `instructions` field in `info` (supports HTML).
@@ -286,6 +317,34 @@ The `score_greaterthan` field specifies the index of the candidate that must hav
286
317
  See [examples/tutorial/esa_deen.json](examples/tutorial/esa_deen.json) for a mock campaign with a fully prepared ESA tutorial.
287
318
  To use it, simply extract the `data` attribute and prefix it to each task in your campaign.
288
319
 
320
+ #### Universal Tutorial Items with `data_welcome`
321
+
322
+ Use `data_welcome` to add tutorial items that users must complete before starting regular tasks. The structure is a list of documents (same as `data`). Welcome items have IDs `welcome_0`, `welcome_1`, etc. and are tracked separately via `progress_welcome`.
323
+
324
+ ### Form Items for User Metadata
325
+
326
+ Collect user information (demographics, expertise) before annotation tasks using form items in `data_welcome`.
327
+ Form items have `text` (label/question) and `form` (field type: `null`, `"string"`, `"number"`, `"choices"`, and `"script"`).
328
+ Documents must be homogeneous: all form items or all evaluation items.
329
+
330
+ ```python
331
+ {
332
+ "data_welcome": [
333
+ [
334
+ {"text": "What is your native language?", "form": "string"},
335
+ {"text": "Rate your expertise (1-10)", "form": "number"}
336
+ ]
337
+ ]
338
+ }
339
+ ```
340
+
341
+ <img width="400" alt="Screenshot of a user form" src="https://github.com/user-attachments/assets/2310e8dc-98e9-4abf-8a27-6781b0094efe" />
342
+
343
+
344
+ It is possible to automatically collect additional information from the host system using `"script"` field type.
345
+ Typically such a form document (or their sequence) would be stored in `"data_welcome"` such that it is both mandatory and show to all users.
346
+ See [examples/user_info_form.json](examples/user_info_form.json).
347
+
289
348
  ### Single-stream Assignment
290
349
 
291
350
  All annotators draw from a shared pool with random assignment:
@@ -299,11 +358,14 @@ All annotators draw from a shared pool with random assignment:
299
358
  # ESA: error spans and scores
300
359
  "protocol": "ESA",
301
360
  "users": 50, # number of annotators (can also be a list, see below)
361
+ "docs_per_user": 10, # optional: show goodbye after N documents per user
302
362
  },
303
363
  "data": [...], # list of all items (shared among all annotators)
304
364
  }
305
365
  ```
306
366
 
367
+ Set `docs_per_user` to limit how many documents each user annotates before seeing the goodbye message (for single-stream, this is the number of documents).
368
+
307
369
  ### Dynamic Assignment
308
370
 
309
371
  The `dynamic` assignment type intelligently selects items based on current model performance to focus annotation effort on top-performing models using contrastive comparisons.
@@ -320,11 +382,14 @@ All items must contain outputs from all models for this assignment type to work
320
382
  "dynamic_contrastive_models": 2, # how many models to compare per item (optional, default: 1)
321
383
  "dynamic_first": 5, # annotations per model before dynamic kicks in (optional, default: 5)
322
384
  "dynamic_backoff": 0.1, # probability of uniform sampling (optional, default: 0)
385
+ "docs_per_user": 20, # optional: show goodbye after N documents per user
323
386
  },
324
387
  "data": [...], # list of all items (shared among all annotators)
325
388
  }
326
389
  ```
327
390
 
391
+ Set `docs_per_user` to limit how many documents each user annotates before seeing the goodbye message (for dynamic, this is roughly the number of documents × models).
392
+
328
393
  **How it works:**
329
394
  1. Initial phase: Each model gets `dynamic_first` annotations with fully random contrastive evaluation
330
395
  2. Dynamic phase: After the initial phase, top `dynamic_top` models (by average score) are identified
@@ -412,6 +477,14 @@ When tokens are supplied, the dashboard will try to show model rankings based on
412
477
 
413
478
  Customize the goodbye message shown to users when they complete all annotations using the `instructions_goodbye` field in campaign info. Supports arbitrary HTML for styling and formatting with variable replacement: `${TOKEN}` (completion token) and `${USER_ID}` (user ID). Default: `"If someone asks you for a token of completion, show them: ${TOKEN}"`.
414
479
 
480
+ ### Prolific Integration
481
+
482
+ Use task-based assignment with Prolific. For each task, Pearmut generates a unique URL which can be uploaded to Prolific's interface. Add redirect (on completion) to `instructions_goodbye`:
483
+ ```json
484
+ "instructions_goodbye": "<a href='https://app.prolific.com/submissions/complete?cc=${TOKEN}'>Click here to return to Prolific</a>"
485
+ ```
486
+ The `${TOKEN}` is automatically replaced based on passing attention checks (see [Attention checks](#tutorial-and-attention-checks) and [Pre-defined tokens](#pre-defined-user-ids-and-tokens)).
487
+
415
488
  ## Terminology
416
489
 
417
490
  - **Campaign**: An annotation project that contains configuration, data, and user assignments. Each campaign has a unique identifier and is defined in a JSON file.
@@ -0,0 +1,20 @@
1
+ pearmut/app.py,sha256=BWbzUHtE8G3jNkiSIW8MGlYWQeIu0PtN7wELO6E5M_0,13873
2
+ pearmut/assignment.py,sha256=o_66Gq6L6f-JIX_lzIDJi1cAMrvLoqS5x4orwlu2GnI,32244
3
+ pearmut/cli.py,sha256=OMNNc1gxX6ZplS0UoCiNN86v9nsIn9hkjzRoaity2Fg,29904
4
+ pearmut/constants.py,sha256=iYONCk2kyYcKy3kikhSKyXRKZ1lWVaVFdcWh6kUYTrQ,4844
5
+ pearmut/results_export.py,sha256=YoVE_mXDBNzsiv88CzZhZeWLMg5FWTOuH6NrbzUZQs4,5746
6
+ pearmut/utils.py,sha256=a5nLLXk4SDASoArlXEvVqKXCbxdSgHTn_4fnJKaNa4A,4778
7
+ pearmut/static/annotate.bundle.js,sha256=HwSm0RODsVnDmPD4xUBdmdLgOYF6-sRyPbC_EmN_EL8,121895
8
+ pearmut/static/annotate.html,sha256=4e_3Ol-swrp_FISaxnLYS-iN05XgN5obuZyy-a5C4K8,5698
9
+ pearmut/static/dashboard.bundle.js,sha256=27izHS7KUPMmQjw-rKGxxpu-RjaocyJYFwrZF8CcPFg,105264
10
+ pearmut/static/dashboard.html,sha256=wzRKZCvV9p3FkL97GEMC2Y23lRvUDQt1EwuAVr8dehI,3456
11
+ pearmut/static/favicon.svg,sha256=gVPxdBlyfyJVkiMfh8WLaiSyH4lpwmKZs8UiOeX8YW4,7347
12
+ pearmut/static/index.bundle.js,sha256=-koQkaoRCei-H40wozYnvf0PnrAoZbtOXHotJcTn5OM,346
13
+ pearmut/static/index.html,sha256=1ug6svDibld7Tl33PZ3e_VZUFjXKC86sXYLXWBH_TJg,930
14
+ pearmut/static/style.css,sha256=kTzbun0LkbS0tn-bdCf-oajIQvda5YJcYeDrCV1PR-o,4885
15
+ pearmut-1.0.3.dist-info/licenses/LICENSE,sha256=GtR6RcTdRn-P23h5pKFuWSLZrLPD0ytHAwSOBt7aLpI,1071
16
+ pearmut-1.0.3.dist-info/METADATA,sha256=9k_RBzLNkrWNkWB8BsI4kvDveeCS0POANRl58sW3VaU,24332
17
+ pearmut-1.0.3.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
18
+ pearmut-1.0.3.dist-info/entry_points.txt,sha256=eEA9LVWsS3neQbMvL_nMvEw8I0oFudw8nQa1iqxOiWM,45
19
+ pearmut-1.0.3.dist-info/top_level.txt,sha256=CdgtUM-SKQDt6o5g0QreO-_7XTBP9_wnHMS1P-Rl5Go,8
20
+ pearmut-1.0.3.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,20 +0,0 @@
1
- pearmut/app.py,sha256=MPJ7XWqPUoMrAeqKlAG8G4zgFg44Sbm3Di0KrrsLR1A,13542
2
- pearmut/assignment.py,sha256=ViKnLttgsM2fswT-qX_F1VMzc6bTL9yRgMYXxAh_Ads,21382
3
- pearmut/cli.py,sha256=vFKtKsloJ_leprmoc9eAjV_FYgdiwadP_ofEEoxvH10,27028
4
- pearmut/constants.py,sha256=iYONCk2kyYcKy3kikhSKyXRKZ1lWVaVFdcWh6kUYTrQ,4844
5
- pearmut/results_export.py,sha256=YoVE_mXDBNzsiv88CzZhZeWLMg5FWTOuH6NrbzUZQs4,5746
6
- pearmut/utils.py,sha256=ZHJl_N0wDdmS1_-O7MA_COGmDr3KmNTpgEvY7RslQ1U,4463
7
- pearmut/static/annotate.bundle.js,sha256=VarUBW50cTORId6OnklJuqDVN0Jo662xIwTD8S8wLbI,116642
8
- pearmut/static/annotate.html,sha256=CGmI7opsWRPSrh8Bxkaw-H8Y7-0vj6ERTzzRz5htqjY,5697
9
- pearmut/static/dashboard.bundle.js,sha256=dNxFAeqpDzZcdfwfXGbYeEif26Oh8pLNgRKOdtWmVMo,106227
10
- pearmut/static/dashboard.html,sha256=IO1a81LFESCk4nyVYxsJ9dQ9gsg7dnBRbRNKobWaa20,3456
11
- pearmut/static/favicon.svg,sha256=gVPxdBlyfyJVkiMfh8WLaiSyH4lpwmKZs8UiOeX8YW4,7347
12
- pearmut/static/index.bundle.js,sha256=-koQkaoRCei-H40wozYnvf0PnrAoZbtOXHotJcTn5OM,346
13
- pearmut/static/index.html,sha256=qumZeNlbdDRe63V1Hivwkgz7T3bBhgmdEg7d8PLN1jY,930
14
- pearmut/static/style.css,sha256=Hj9XkXkudgHX3m4CFMwJU8kZPeCrG6-ZpwtmVuGv9VY,4225
15
- pearmut-1.0.2.dist-info/licenses/LICENSE,sha256=GtR6RcTdRn-P23h5pKFuWSLZrLPD0ytHAwSOBt7aLpI,1071
16
- pearmut-1.0.2.dist-info/METADATA,sha256=DYqf0fqnwCoKlvQ3-lPur-ABqyAEE73GNtBetaTR2Cg,20583
17
- pearmut-1.0.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
18
- pearmut-1.0.2.dist-info/entry_points.txt,sha256=eEA9LVWsS3neQbMvL_nMvEw8I0oFudw8nQa1iqxOiWM,45
19
- pearmut-1.0.2.dist-info/top_level.txt,sha256=CdgtUM-SKQDt6o5g0QreO-_7XTBP9_wnHMS1P-Rl5Go,8
20
- pearmut-1.0.2.dist-info/RECORD,,