microlens-submit 0.16.3__py3-none-any.whl → 0.16.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- microlens_submit/__init__.py +1 -1
- microlens_submit/cli/commands/export.py +49 -8
- microlens_submit/cli/commands/init.py +16 -12
- microlens_submit/cli/commands/solutions.py +87 -34
- microlens_submit/cli/main.py +2 -1
- microlens_submit/dossier/event_page.py +20 -4
- microlens_submit/dossier/full_report.py +42 -3
- microlens_submit/dossier/solution_page.py +44 -5
- microlens_submit/dossier/utils.py +63 -0
- microlens_submit/error_messages.py +2 -1
- microlens_submit/models/solution.py +6 -0
- microlens_submit/models/submission.py +33 -2
- microlens_submit/tier_validation.py +41 -35
- microlens_submit/utils.py +8 -1
- microlens_submit/validate_parameters.py +46 -0
- {microlens_submit-0.16.3.dist-info → microlens_submit-0.16.5.dist-info}/METADATA +17 -8
- microlens_submit-0.16.5.dist-info/RECORD +33 -0
- {microlens_submit-0.16.3.dist-info → microlens_submit-0.16.5.dist-info}/WHEEL +1 -1
- microlens_submit-0.16.3.dist-info/RECORD +0 -33
- {microlens_submit-0.16.3.dist-info → microlens_submit-0.16.5.dist-info}/entry_points.txt +0 -0
- {microlens_submit-0.16.3.dist-info → microlens_submit-0.16.5.dist-info}/licenses/LICENSE +0 -0
- {microlens_submit-0.16.3.dist-info → microlens_submit-0.16.5.dist-info}/top_level.txt +0 -0
microlens_submit/__init__.py
CHANGED
|
@@ -42,10 +42,13 @@ def export(
|
|
|
42
42
|
|
|
43
43
|
def remove_event(
|
|
44
44
|
event_id: str,
|
|
45
|
-
force: bool = typer.Option(False, "--force", help="
|
|
45
|
+
force: bool = typer.Option(False, "--force", help="Required to remove an event (prevents accidents)"),
|
|
46
46
|
project_path: Path = typer.Argument(Path("."), help="Project directory"),
|
|
47
47
|
) -> None:
|
|
48
|
-
"""Remove an entire event and all its solutions from the submission.
|
|
48
|
+
"""Remove an entire event and all its solutions from the submission.
|
|
49
|
+
|
|
50
|
+
This action is destructive and requires --force to proceed.
|
|
51
|
+
"""
|
|
49
52
|
submission = load(str(project_path))
|
|
50
53
|
|
|
51
54
|
if event_id not in submission.events:
|
|
@@ -57,13 +60,11 @@ def remove_event(
|
|
|
57
60
|
|
|
58
61
|
if not force:
|
|
59
62
|
typer.echo(
|
|
60
|
-
f"{symbol('warning')}
|
|
63
|
+
f"{symbol('warning')} Refusing to remove event '{event_id}' without --force "
|
|
64
|
+
f"({solution_count} solutions)."
|
|
61
65
|
)
|
|
62
|
-
typer.echo("
|
|
63
|
-
|
|
64
|
-
if not confirm:
|
|
65
|
-
typer.echo(f"{symbol('error')} Operation cancelled")
|
|
66
|
-
raise typer.Exit(0)
|
|
66
|
+
typer.echo(f"{symbol('hint')} Consider deactivating solutions instead, or re-run with --force to proceed.")
|
|
67
|
+
raise typer.Exit(0)
|
|
67
68
|
|
|
68
69
|
try:
|
|
69
70
|
removed = submission.remove_event(event_id, force=force)
|
|
@@ -94,11 +95,33 @@ def set_repo_url(
|
|
|
94
95
|
)
|
|
95
96
|
|
|
96
97
|
|
|
98
|
+
def set_git_dir(
|
|
99
|
+
git_dir: Path = typer.Argument(..., help="Path to the git working tree"),
|
|
100
|
+
project_path: Path = typer.Argument(Path("."), help="Project directory"),
|
|
101
|
+
) -> None:
|
|
102
|
+
"""Set or update the git working tree path in the submission metadata."""
|
|
103
|
+
sub = load(str(project_path))
|
|
104
|
+
git_dir_path = git_dir.expanduser().resolve()
|
|
105
|
+
if not git_dir_path.exists():
|
|
106
|
+
raise typer.BadParameter(f"git_dir does not exist: {git_dir_path}")
|
|
107
|
+
sub.git_dir = str(git_dir_path)
|
|
108
|
+
sub.save()
|
|
109
|
+
console.print(
|
|
110
|
+
Panel(
|
|
111
|
+
f"Set git_dir to {git_dir_path} in {project_path}/submission.json",
|
|
112
|
+
style="bold green",
|
|
113
|
+
)
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
|
|
97
117
|
def set_hardware_info(
|
|
98
118
|
cpu: Optional[str] = typer.Option(None, "--cpu", help="CPU model/description"),
|
|
99
119
|
cpu_details: Optional[str] = typer.Option(None, "--cpu-details", help="Detailed CPU information"),
|
|
100
120
|
memory_gb: Optional[float] = typer.Option(None, "--memory-gb", help="Memory in GB"),
|
|
101
121
|
ram_gb: Optional[float] = typer.Option(None, "--ram-gb", help="RAM in GB (alternative to --memory-gb)"),
|
|
122
|
+
gpu: Optional[str] = typer.Option(None, "--gpu", help="GPU model/description"),
|
|
123
|
+
gpu_count: Optional[int] = typer.Option(None, "--gpu-count", help="Number of GPUs"),
|
|
124
|
+
gpu_memory_gb: Optional[float] = typer.Option(None, "--gpu-memory-gb", help="GPU memory per device in GB"),
|
|
102
125
|
platform: Optional[str] = typer.Option(
|
|
103
126
|
None,
|
|
104
127
|
"--platform",
|
|
@@ -153,6 +176,24 @@ def set_hardware_info(
|
|
|
153
176
|
changes.append(f"Set nexus_image: {nexus_image}")
|
|
154
177
|
sub.hardware_info["nexus_image"] = nexus_image
|
|
155
178
|
|
|
179
|
+
if any(value is not None for value in (gpu, gpu_count, gpu_memory_gb)):
|
|
180
|
+
gpu_info = sub.hardware_info.get("gpu")
|
|
181
|
+
if not isinstance(gpu_info, dict):
|
|
182
|
+
gpu_info = {}
|
|
183
|
+
if gpu is not None:
|
|
184
|
+
if gpu_info.get("model") != gpu:
|
|
185
|
+
changes.append(f"Set gpu.model: {gpu}")
|
|
186
|
+
gpu_info["model"] = gpu
|
|
187
|
+
if gpu_count is not None:
|
|
188
|
+
if gpu_info.get("count") != gpu_count:
|
|
189
|
+
changes.append(f"Set gpu.count: {gpu_count}")
|
|
190
|
+
gpu_info["count"] = gpu_count
|
|
191
|
+
if gpu_memory_gb is not None:
|
|
192
|
+
if gpu_info.get("memory_gb") != gpu_memory_gb:
|
|
193
|
+
changes.append(f"Set gpu.memory_gb: {gpu_memory_gb}")
|
|
194
|
+
gpu_info["memory_gb"] = gpu_memory_gb
|
|
195
|
+
sub.hardware_info["gpu"] = gpu_info
|
|
196
|
+
|
|
156
197
|
# Show dry run results
|
|
157
198
|
if dry_run:
|
|
158
199
|
if changes:
|
|
@@ -17,6 +17,7 @@ def init(
|
|
|
17
17
|
team_name: str = typer.Option(..., help="Team name"),
|
|
18
18
|
tier: str = typer.Option(..., help="Challenge tier"),
|
|
19
19
|
project_path: Path = typer.Argument(Path("."), help="Project directory"),
|
|
20
|
+
show_warnings: bool = True,
|
|
20
21
|
) -> None:
|
|
21
22
|
"""Create a new submission project in the specified directory.
|
|
22
23
|
|
|
@@ -29,7 +30,7 @@ def init(
|
|
|
29
30
|
|
|
30
31
|
Args:
|
|
31
32
|
team_name: Name of the participating team (e.g., "Team Alpha").
|
|
32
|
-
tier: Challenge tier level (e.g., "
|
|
33
|
+
tier: Challenge tier level (e.g., "beginner", "experienced").
|
|
33
34
|
project_path: Directory where the project will be created.
|
|
34
35
|
Defaults to current directory if not specified.
|
|
35
36
|
|
|
@@ -38,10 +39,10 @@ def init(
|
|
|
38
39
|
|
|
39
40
|
Example:
|
|
40
41
|
# Create project in current directory
|
|
41
|
-
microlens-submit init --team-name "Team Alpha" --tier "
|
|
42
|
+
microlens-submit init --team-name "Team Alpha" --tier "experienced"
|
|
42
43
|
|
|
43
44
|
# Create project in specific directory
|
|
44
|
-
microlens-submit init --team-name "Team Beta" --tier "
|
|
45
|
+
microlens-submit init --team-name "Team Beta" --tier "beginner" ./my_submission
|
|
45
46
|
|
|
46
47
|
# Project structure created:
|
|
47
48
|
# ./my_submission/
|
|
@@ -100,12 +101,15 @@ def init(
|
|
|
100
101
|
)
|
|
101
102
|
|
|
102
103
|
# Run warnings-only validation
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
104
|
+
if show_warnings:
|
|
105
|
+
warnings = sub.run_validation_warnings()
|
|
106
|
+
if warnings:
|
|
107
|
+
console.print(f"[yellow]{symbol('warning')} Project initialized with warnings:[/yellow]")
|
|
108
|
+
for warning in warnings:
|
|
109
|
+
console.print(f" [yellow]• {warning}[/yellow]")
|
|
110
|
+
console.print(
|
|
111
|
+
f"[yellow]{symbol('hint')} These warnings will become errors when saving or exporting.[/yellow]"
|
|
112
|
+
)
|
|
109
113
|
|
|
110
114
|
# Try to save, but don't fail if there are validation errors
|
|
111
115
|
try:
|
|
@@ -134,13 +138,13 @@ def nexus_init(
|
|
|
134
138
|
|
|
135
139
|
Args:
|
|
136
140
|
team_name: Name of the participating team (e.g., "Team Alpha").
|
|
137
|
-
tier: Challenge tier level (e.g., "
|
|
141
|
+
tier: Challenge tier level (e.g., "beginner", "experienced").
|
|
138
142
|
project_path: Directory where the project will be created.
|
|
139
143
|
Defaults to current directory if not specified.
|
|
140
144
|
|
|
141
145
|
Example:
|
|
142
146
|
# Initialize project with Nexus platform info
|
|
143
|
-
microlens-submit nexus-init --team-name "Team Alpha" --tier "
|
|
147
|
+
microlens-submit nexus-init --team-name "Team Alpha" --tier "experienced" ./project
|
|
144
148
|
|
|
145
149
|
# This will automatically detect:
|
|
146
150
|
# - CPU model from /proc/cpuinfo
|
|
@@ -152,7 +156,7 @@ def nexus_init(
|
|
|
152
156
|
environment. It will silently skip any environment information that
|
|
153
157
|
cannot be detected (e.g., if running outside of Nexus).
|
|
154
158
|
"""
|
|
155
|
-
init(team_name=team_name, tier=tier, project_path=project_path)
|
|
159
|
+
init(team_name=team_name, tier=tier, project_path=project_path, show_warnings=False)
|
|
156
160
|
sub = load(str(project_path))
|
|
157
161
|
sub.autofill_nexus_info()
|
|
158
162
|
|
|
@@ -2,8 +2,13 @@
|
|
|
2
2
|
|
|
3
3
|
import json
|
|
4
4
|
import os
|
|
5
|
+
import re
|
|
6
|
+
import shlex
|
|
7
|
+
import shutil
|
|
8
|
+
import subprocess
|
|
9
|
+
import sys
|
|
5
10
|
from pathlib import Path
|
|
6
|
-
from typing import Dict, List, Optional, Tuple
|
|
11
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
7
12
|
|
|
8
13
|
import typer
|
|
9
14
|
from rich.console import Console
|
|
@@ -16,6 +21,38 @@ from microlens_submit.utils import import_solutions_from_csv, load
|
|
|
16
21
|
console = Console()
|
|
17
22
|
|
|
18
23
|
|
|
24
|
+
_NUMERIC_RE = re.compile(r"^[+-]?((\\d+(\\.\\d*)?)|(\\.\\d+))([eE][+-]?\\d+)?$")
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def _parse_cli_value(value: str) -> Any:
|
|
28
|
+
"""Parse a CLI value using JSON, with a numeric fallback for .001-style input."""
|
|
29
|
+
try:
|
|
30
|
+
return json.loads(value)
|
|
31
|
+
except json.JSONDecodeError:
|
|
32
|
+
if _NUMERIC_RE.match(value.strip()):
|
|
33
|
+
try:
|
|
34
|
+
if re.match(r"^[+-]?\\d+$", value.strip()):
|
|
35
|
+
return int(value)
|
|
36
|
+
return float(value)
|
|
37
|
+
except ValueError:
|
|
38
|
+
pass
|
|
39
|
+
return value
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _run_editor(editor_cmd: str, notes_file: Path) -> bool:
|
|
43
|
+
parts = shlex.split(editor_cmd)
|
|
44
|
+
if not parts:
|
|
45
|
+
return False
|
|
46
|
+
if os.name == "nt" and parts[0].lower() in ("code", "code.exe"):
|
|
47
|
+
if "--wait" not in parts and "-w" not in parts:
|
|
48
|
+
parts.append("--wait")
|
|
49
|
+
try:
|
|
50
|
+
subprocess.run(parts + [str(notes_file)], check=False)
|
|
51
|
+
return True
|
|
52
|
+
except FileNotFoundError:
|
|
53
|
+
return False
|
|
54
|
+
|
|
55
|
+
|
|
19
56
|
def _parse_pairs(pairs: Optional[List[str]]) -> Optional[Dict]:
|
|
20
57
|
"""Convert CLI key=value options into a dictionary."""
|
|
21
58
|
if not pairs:
|
|
@@ -25,10 +62,7 @@ def _parse_pairs(pairs: Optional[List[str]]) -> Optional[Dict]:
|
|
|
25
62
|
if "=" not in item:
|
|
26
63
|
raise typer.BadParameter(f"Invalid format: {item}")
|
|
27
64
|
key, value = item.split("=", 1)
|
|
28
|
-
|
|
29
|
-
out[key] = json.loads(value)
|
|
30
|
-
except json.JSONDecodeError:
|
|
31
|
-
out[key] = value
|
|
65
|
+
out[key] = _parse_cli_value(value)
|
|
32
66
|
return out
|
|
33
67
|
|
|
34
68
|
|
|
@@ -219,10 +253,7 @@ def add_solution(
|
|
|
219
253
|
if "=" not in p:
|
|
220
254
|
raise typer.BadParameter(f"Invalid parameter format: {p}")
|
|
221
255
|
key, value = p.split("=", 1)
|
|
222
|
-
|
|
223
|
-
params[key] = json.loads(value)
|
|
224
|
-
except json.JSONDecodeError:
|
|
225
|
-
params[key] = value
|
|
256
|
+
params[key] = _parse_cli_value(value)
|
|
226
257
|
allowed_model_types = [
|
|
227
258
|
"1S1L",
|
|
228
259
|
"1S2L",
|
|
@@ -254,7 +285,7 @@ def add_solution(
|
|
|
254
285
|
sol.relative_probability = relative_probability
|
|
255
286
|
sol.n_data_points = n_data_points
|
|
256
287
|
if cpu_hours is not None or wall_time_hours is not None:
|
|
257
|
-
sol.set_compute_info(cpu_hours=cpu_hours, wall_time_hours=wall_time_hours)
|
|
288
|
+
sol.set_compute_info(cpu_hours=cpu_hours, wall_time_hours=wall_time_hours, git_dir=sub.git_dir)
|
|
258
289
|
sol.lightcurve_plot_path = str(lightcurve_plot_path) if lightcurve_plot_path else None
|
|
259
290
|
sol.lens_plane_plot_path = str(lens_plane_plot_path) if lens_plane_plot_path else None
|
|
260
291
|
# Handle notes file logic
|
|
@@ -368,10 +399,13 @@ def activate(
|
|
|
368
399
|
|
|
369
400
|
def remove_solution(
|
|
370
401
|
solution_id: str,
|
|
371
|
-
force: bool = typer.Option(False, "--force", help="
|
|
402
|
+
force: bool = typer.Option(False, "--force", help="Required to remove a solution (prevents accidents)"),
|
|
372
403
|
project_path: Path = typer.Argument(Path("."), help="Project directory"),
|
|
373
404
|
) -> None:
|
|
374
|
-
"""Completely remove a solution from the submission.
|
|
405
|
+
"""Completely remove a solution from the submission.
|
|
406
|
+
|
|
407
|
+
This action is destructive and requires --force to proceed.
|
|
408
|
+
"""
|
|
375
409
|
submission = load(project_path)
|
|
376
410
|
|
|
377
411
|
# Find the solution across all events
|
|
@@ -387,6 +421,16 @@ def remove_solution(
|
|
|
387
421
|
console.print(f"[red]Error: Solution {solution_id} not found[/red]")
|
|
388
422
|
raise typer.Exit(1)
|
|
389
423
|
|
|
424
|
+
if not force:
|
|
425
|
+
console.print(
|
|
426
|
+
f"[yellow]{symbol('warning')} Refusing to remove solution {solution_id[:8]}... without --force.[/yellow]"
|
|
427
|
+
)
|
|
428
|
+
console.print(
|
|
429
|
+
f"[blue]{symbol('hint')} Consider using deactivate to keep the solution, "
|
|
430
|
+
"or re-run with --force to proceed.[/blue]"
|
|
431
|
+
)
|
|
432
|
+
raise typer.Exit(0)
|
|
433
|
+
|
|
390
434
|
try:
|
|
391
435
|
removed = submission.events[event_id].remove_solution(solution_id, force=force)
|
|
392
436
|
if removed:
|
|
@@ -561,16 +605,14 @@ def edit_solution(
|
|
|
561
605
|
target_solution.set_compute_info(
|
|
562
606
|
cpu_hours=cpu_hours if cpu_hours is not None else old_cpu,
|
|
563
607
|
wall_time_hours=(wall_time_hours if wall_time_hours is not None else old_wall),
|
|
608
|
+
git_dir=sub.git_dir,
|
|
564
609
|
)
|
|
565
610
|
if param:
|
|
566
611
|
for p in param:
|
|
567
612
|
if "=" not in p:
|
|
568
613
|
raise typer.BadParameter(f"Invalid parameter format: {p}")
|
|
569
614
|
key, value = p.split("=", 1)
|
|
570
|
-
|
|
571
|
-
new_value = json.loads(value)
|
|
572
|
-
except json.JSONDecodeError:
|
|
573
|
-
new_value = value
|
|
615
|
+
new_value = _parse_cli_value(value)
|
|
574
616
|
old_value = target_solution.parameters.get(key)
|
|
575
617
|
if old_value != new_value:
|
|
576
618
|
changes.append(f"Update parameter {key}: {old_value} {arrow} {new_value}")
|
|
@@ -582,10 +624,7 @@ def edit_solution(
|
|
|
582
624
|
if "=" not in p:
|
|
583
625
|
raise typer.BadParameter(f"Invalid uncertainty format: {p}")
|
|
584
626
|
key, value = p.split("=", 1)
|
|
585
|
-
|
|
586
|
-
new_value = json.loads(value)
|
|
587
|
-
except json.JSONDecodeError:
|
|
588
|
-
new_value = value
|
|
627
|
+
new_value = _parse_cli_value(value)
|
|
589
628
|
old_value = target_solution.parameter_uncertainties.get(key)
|
|
590
629
|
if old_value != new_value:
|
|
591
630
|
changes.append(f"Update uncertainty {key}: {old_value} {arrow} {new_value}")
|
|
@@ -636,21 +675,35 @@ def edit_notes(
|
|
|
636
675
|
notes_file.parent.mkdir(parents=True, exist_ok=True)
|
|
637
676
|
if not notes_file.exists():
|
|
638
677
|
notes_file.write_text("", encoding="utf-8")
|
|
639
|
-
editor = os.environ.get("EDITOR"
|
|
640
|
-
if editor:
|
|
641
|
-
|
|
678
|
+
editor = os.environ.get("EDITOR") or os.environ.get("VISUAL")
|
|
679
|
+
if editor and _run_editor(editor, notes_file):
|
|
680
|
+
return
|
|
681
|
+
fallbacks = ["nano", "vi", "vim", "code"]
|
|
682
|
+
if os.name == "nt":
|
|
683
|
+
fallbacks = ["code", "notepad", "notepad.exe"]
|
|
684
|
+
for fallback in fallbacks:
|
|
685
|
+
if shutil.which(fallback):
|
|
686
|
+
if _run_editor(fallback, notes_file):
|
|
687
|
+
return
|
|
688
|
+
if os.name == "nt":
|
|
689
|
+
try:
|
|
690
|
+
os.startfile(notes_file) # type: ignore[attr-defined]
|
|
691
|
+
return
|
|
692
|
+
except OSError:
|
|
693
|
+
pass
|
|
694
|
+
elif sys.platform == "darwin":
|
|
695
|
+
if shutil.which("open"):
|
|
696
|
+
subprocess.run(["open", "-W", str(notes_file)], check=False)
|
|
697
|
+
return
|
|
642
698
|
else:
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
style="bold red",
|
|
652
|
-
)
|
|
653
|
-
raise typer.Exit(code=1)
|
|
699
|
+
if shutil.which("xdg-open"):
|
|
700
|
+
subprocess.run(["xdg-open", str(notes_file)], check=False)
|
|
701
|
+
return
|
|
702
|
+
console.print(
|
|
703
|
+
f"Could not find an editor to open {notes_file}",
|
|
704
|
+
style="bold red",
|
|
705
|
+
)
|
|
706
|
+
raise typer.Exit(code=1)
|
|
654
707
|
return
|
|
655
708
|
console.print(f"Solution {solution_id} not found", style="bold red")
|
|
656
709
|
raise typer.Exit(code=1)
|
microlens_submit/cli/main.py
CHANGED
|
@@ -17,7 +17,7 @@ and scripted usage patterns.
|
|
|
17
17
|
|
|
18
18
|
**Example Workflow:**
|
|
19
19
|
# Initialize a new project
|
|
20
|
-
microlens-submit init --team-name "Team Alpha" --tier "
|
|
20
|
+
microlens-submit init --team-name "Team Alpha" --tier "experienced" ./my_project
|
|
21
21
|
|
|
22
22
|
# Add a solution
|
|
23
23
|
microlens-submit add-solution EVENT001 1S1L ./my_project \
|
|
@@ -117,4 +117,5 @@ app.command("generate-dossier")(dossier.generate_dossier)
|
|
|
117
117
|
app.command("export")(export.export)
|
|
118
118
|
app.command("remove-event")(export.remove_event)
|
|
119
119
|
app.command("set-repo-url")(export.set_repo_url)
|
|
120
|
+
app.command("set-git-dir")(export.set_git_dir)
|
|
120
121
|
app.command("set-hardware-info")(export.set_hardware_info)
|
|
@@ -12,6 +12,7 @@ from pathlib import Path
|
|
|
12
12
|
from .. import __version__
|
|
13
13
|
from ..models import Event, Submission
|
|
14
14
|
from .solution_page import generate_solution_page
|
|
15
|
+
from .utils import resolve_dossier_asset_path
|
|
15
16
|
|
|
16
17
|
|
|
17
18
|
def generate_event_page(event: Event, submission: Submission, output_dir: Path) -> None:
|
|
@@ -50,7 +51,17 @@ def generate_event_page(event: Event, submission: Submission, output_dir: Path)
|
|
|
50
51
|
to individual solution pages.
|
|
51
52
|
"""
|
|
52
53
|
# Prepare output directory (already created)
|
|
53
|
-
|
|
54
|
+
project_root = Path(submission.project_path)
|
|
55
|
+
event_data_link = ""
|
|
56
|
+
if hasattr(event, "event_data_path") and event.event_data_path:
|
|
57
|
+
event_data_link = resolve_dossier_asset_path(
|
|
58
|
+
event.event_data_path,
|
|
59
|
+
project_root,
|
|
60
|
+
output_dir,
|
|
61
|
+
subdir="event-data",
|
|
62
|
+
prefix=f"{event.event_id}_event_data",
|
|
63
|
+
)
|
|
64
|
+
html = _generate_event_page_content(event, submission, event_data_link=event_data_link)
|
|
54
65
|
with (output_dir / f"{event.event_id}.html").open("w", encoding="utf-8") as f:
|
|
55
66
|
f.write(html)
|
|
56
67
|
|
|
@@ -59,7 +70,12 @@ def generate_event_page(event: Event, submission: Submission, output_dir: Path)
|
|
|
59
70
|
generate_solution_page(sol, event, submission, output_dir)
|
|
60
71
|
|
|
61
72
|
|
|
62
|
-
def _generate_event_page_content(
|
|
73
|
+
def _generate_event_page_content(
|
|
74
|
+
event: Event,
|
|
75
|
+
submission: Submission,
|
|
76
|
+
*,
|
|
77
|
+
event_data_link: str = "",
|
|
78
|
+
) -> str:
|
|
63
79
|
"""Generate the HTML content for an event dossier page.
|
|
64
80
|
|
|
65
81
|
Creates the complete HTML content for a single event page, including
|
|
@@ -163,10 +179,10 @@ def _generate_event_page_content(event: Event, submission: Submission) -> str:
|
|
|
163
179
|
)
|
|
164
180
|
# Optional raw data link
|
|
165
181
|
raw_data_html = ""
|
|
166
|
-
if
|
|
182
|
+
if event_data_link:
|
|
167
183
|
raw_data_html = (
|
|
168
184
|
f'<p class="text-rtd-text">Raw Event Data: '
|
|
169
|
-
f'<a href="{
|
|
185
|
+
f'<a href="{event_data_link}" '
|
|
170
186
|
f'class="text-rtd-accent hover:underline">Download Data</a></p>'
|
|
171
187
|
)
|
|
172
188
|
# HTML content
|
|
@@ -15,6 +15,7 @@ from ..models.solution import Solution
|
|
|
15
15
|
from .dashboard import _generate_dashboard_content
|
|
16
16
|
from .event_page import _generate_event_page_content
|
|
17
17
|
from .solution_page import _generate_solution_page_content
|
|
18
|
+
from .utils import resolve_dossier_asset_path
|
|
18
19
|
|
|
19
20
|
|
|
20
21
|
def generate_full_dossier_report_html(submission: Submission, output_dir: Path) -> None:
|
|
@@ -63,19 +64,57 @@ def generate_full_dossier_report_html(submission: Submission, output_dir: Path)
|
|
|
63
64
|
all_html_sections.append('<hr class="my-8 border-t-2 border-rtd-accent">') # Divider after dashboard
|
|
64
65
|
|
|
65
66
|
# Events and solutions
|
|
67
|
+
project_root = Path(submission.project_path)
|
|
66
68
|
for event in submission.events.values():
|
|
67
|
-
|
|
69
|
+
event_data_link = ""
|
|
70
|
+
if hasattr(event, "event_data_path") and event.event_data_path:
|
|
71
|
+
event_data_link = resolve_dossier_asset_path(
|
|
72
|
+
event.event_data_path,
|
|
73
|
+
project_root,
|
|
74
|
+
output_dir,
|
|
75
|
+
subdir="event-data",
|
|
76
|
+
prefix=f"{event.event_id}_event_data",
|
|
77
|
+
)
|
|
78
|
+
event_html = _generate_event_page_content(event, submission, event_data_link=event_data_link)
|
|
68
79
|
event_body = extract_main_content_body(event_html, section_type="event", section_id=event.event_id)
|
|
69
80
|
all_html_sections.append(event_body)
|
|
70
81
|
all_html_sections.append('<hr class="my-8 border-t-2 border-rtd-accent">') # Divider after event
|
|
71
82
|
|
|
72
83
|
for sol in event.get_active_solutions():
|
|
73
|
-
|
|
84
|
+
lc_plot = resolve_dossier_asset_path(
|
|
85
|
+
sol.lightcurve_plot_path,
|
|
86
|
+
project_root,
|
|
87
|
+
output_dir,
|
|
88
|
+
subdir="plots",
|
|
89
|
+
prefix=f"{event.event_id}_{sol.solution_id}_lightcurve",
|
|
90
|
+
)
|
|
91
|
+
lens_plot = resolve_dossier_asset_path(
|
|
92
|
+
sol.lens_plane_plot_path,
|
|
93
|
+
project_root,
|
|
94
|
+
output_dir,
|
|
95
|
+
subdir="plots",
|
|
96
|
+
prefix=f"{event.event_id}_{sol.solution_id}_lens",
|
|
97
|
+
)
|
|
98
|
+
posterior = resolve_dossier_asset_path(
|
|
99
|
+
sol.posterior_path,
|
|
100
|
+
project_root,
|
|
101
|
+
output_dir,
|
|
102
|
+
subdir="posteriors",
|
|
103
|
+
prefix=f"{event.event_id}_{sol.solution_id}_posterior",
|
|
104
|
+
)
|
|
105
|
+
sol_html = _generate_solution_page_content(
|
|
106
|
+
sol,
|
|
107
|
+
event,
|
|
108
|
+
submission,
|
|
109
|
+
lc_plot=lc_plot,
|
|
110
|
+
lens_plot=lens_plot,
|
|
111
|
+
posterior=posterior,
|
|
112
|
+
)
|
|
74
113
|
sol_body = extract_main_content_body(
|
|
75
114
|
sol_html,
|
|
76
115
|
section_type="solution",
|
|
77
116
|
section_id=sol.solution_id,
|
|
78
|
-
project_root=
|
|
117
|
+
project_root=project_root,
|
|
79
118
|
solution=sol,
|
|
80
119
|
)
|
|
81
120
|
all_html_sections.append(sol_body)
|
|
@@ -8,6 +8,7 @@ notes rendering, and evaluator-only sections.
|
|
|
8
8
|
|
|
9
9
|
from datetime import datetime
|
|
10
10
|
from pathlib import Path
|
|
11
|
+
from typing import Optional
|
|
11
12
|
|
|
12
13
|
import markdown
|
|
13
14
|
|
|
@@ -15,6 +16,7 @@ from .. import __version__
|
|
|
15
16
|
from ..models.event import Event
|
|
16
17
|
from ..models.solution import Solution
|
|
17
18
|
from ..models.submission import Submission
|
|
19
|
+
from .utils import resolve_dossier_asset_path
|
|
18
20
|
|
|
19
21
|
|
|
20
22
|
def generate_solution_page(solution: Solution, event: Event, submission: Submission, output_dir: Path) -> None:
|
|
@@ -55,12 +57,49 @@ def generate_solution_page(solution: Solution, event: Event, submission: Submiss
|
|
|
55
57
|
Notes are rendered with syntax highlighting for code blocks.
|
|
56
58
|
"""
|
|
57
59
|
# Prepare output directory (already created)
|
|
58
|
-
|
|
60
|
+
project_root = Path(submission.project_path)
|
|
61
|
+
lc_plot = resolve_dossier_asset_path(
|
|
62
|
+
solution.lightcurve_plot_path,
|
|
63
|
+
project_root,
|
|
64
|
+
output_dir,
|
|
65
|
+
subdir="plots",
|
|
66
|
+
prefix=f"{event.event_id}_{solution.solution_id}_lightcurve",
|
|
67
|
+
)
|
|
68
|
+
lens_plot = resolve_dossier_asset_path(
|
|
69
|
+
solution.lens_plane_plot_path,
|
|
70
|
+
project_root,
|
|
71
|
+
output_dir,
|
|
72
|
+
subdir="plots",
|
|
73
|
+
prefix=f"{event.event_id}_{solution.solution_id}_lens",
|
|
74
|
+
)
|
|
75
|
+
posterior = resolve_dossier_asset_path(
|
|
76
|
+
solution.posterior_path,
|
|
77
|
+
project_root,
|
|
78
|
+
output_dir,
|
|
79
|
+
subdir="posteriors",
|
|
80
|
+
prefix=f"{event.event_id}_{solution.solution_id}_posterior",
|
|
81
|
+
)
|
|
82
|
+
html = _generate_solution_page_content(
|
|
83
|
+
solution,
|
|
84
|
+
event,
|
|
85
|
+
submission,
|
|
86
|
+
lc_plot=lc_plot,
|
|
87
|
+
lens_plot=lens_plot,
|
|
88
|
+
posterior=posterior,
|
|
89
|
+
)
|
|
59
90
|
with (output_dir / f"{solution.solution_id}.html").open("w", encoding="utf-8") as f:
|
|
60
91
|
f.write(html)
|
|
61
92
|
|
|
62
93
|
|
|
63
|
-
def _generate_solution_page_content(
|
|
94
|
+
def _generate_solution_page_content(
|
|
95
|
+
solution: Solution,
|
|
96
|
+
event: Event,
|
|
97
|
+
submission: Submission,
|
|
98
|
+
*,
|
|
99
|
+
lc_plot: Optional[str] = None,
|
|
100
|
+
lens_plot: Optional[str] = None,
|
|
101
|
+
posterior: Optional[str] = None,
|
|
102
|
+
) -> str:
|
|
64
103
|
"""Generate the HTML content for a solution dossier page.
|
|
65
104
|
|
|
66
105
|
Creates the complete HTML content for a single solution page, including
|
|
@@ -138,9 +177,9 @@ def _generate_solution_page_content(solution: Solution, event: Event, submission
|
|
|
138
177
|
# Higher-order effects
|
|
139
178
|
hoe_str = ", ".join(solution.higher_order_effects) if solution.higher_order_effects else "None"
|
|
140
179
|
# Plot paths (relative to solution page)
|
|
141
|
-
lc_plot = solution.lightcurve_plot_path or ""
|
|
142
|
-
lens_plot = solution.lens_plane_plot_path or ""
|
|
143
|
-
posterior = solution.posterior_path or ""
|
|
180
|
+
lc_plot = lc_plot if lc_plot is not None else (solution.lightcurve_plot_path or "")
|
|
181
|
+
lens_plot = lens_plot if lens_plot is not None else (solution.lens_plane_plot_path or "")
|
|
182
|
+
posterior = posterior if posterior is not None else (solution.posterior_path or "")
|
|
144
183
|
# Physical parameters table
|
|
145
184
|
phys_rows = []
|
|
146
185
|
phys = solution.physical_parameters or {}
|
|
@@ -6,7 +6,11 @@ generation package, including hardware formatting, GitHub URL parsing,
|
|
|
6
6
|
and other helper functions.
|
|
7
7
|
"""
|
|
8
8
|
|
|
9
|
+
import hashlib
|
|
10
|
+
import shutil
|
|
11
|
+
from pathlib import Path
|
|
9
12
|
from typing import Any, Dict, Optional
|
|
13
|
+
from urllib.parse import quote, urlparse
|
|
10
14
|
|
|
11
15
|
|
|
12
16
|
def format_hardware_info(hardware_info: Optional[Dict[str, Any]]) -> str:
|
|
@@ -109,3 +113,62 @@ def extract_github_repo_name(repo_url: str) -> str:
|
|
|
109
113
|
return f"{parts[-2]}/{parts[-1]}"
|
|
110
114
|
|
|
111
115
|
return None
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def resolve_dossier_asset_path(
|
|
119
|
+
path_value: Optional[str],
|
|
120
|
+
project_root: Path,
|
|
121
|
+
output_dir: Path,
|
|
122
|
+
*,
|
|
123
|
+
subdir: str,
|
|
124
|
+
prefix: Optional[str] = None,
|
|
125
|
+
) -> str:
|
|
126
|
+
"""Resolve and copy a local asset into the dossier assets folder.
|
|
127
|
+
|
|
128
|
+
If the input path is a URL, it is returned unchanged. If it is a local
|
|
129
|
+
filesystem path, it is resolved relative to the project root, copied into
|
|
130
|
+
the dossier assets folder, and returned as a URL-encoded relative path.
|
|
131
|
+
|
|
132
|
+
Args:
|
|
133
|
+
path_value: The original path or URL string.
|
|
134
|
+
project_root: Root directory of the submission project.
|
|
135
|
+
output_dir: Dossier output directory containing the HTML files.
|
|
136
|
+
subdir: Subdirectory under assets/ for the copied file.
|
|
137
|
+
prefix: Optional prefix for the copied file name to avoid collisions.
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
str: A URL or a relative path suitable for HTML src/href attributes.
|
|
141
|
+
"""
|
|
142
|
+
if not path_value:
|
|
143
|
+
return ""
|
|
144
|
+
|
|
145
|
+
parsed = urlparse(path_value)
|
|
146
|
+
if parsed.scheme in {"http", "https", "data"}:
|
|
147
|
+
return path_value
|
|
148
|
+
|
|
149
|
+
if parsed.scheme == "file":
|
|
150
|
+
source_path = Path(parsed.path)
|
|
151
|
+
else:
|
|
152
|
+
source_path = Path(path_value).expanduser()
|
|
153
|
+
|
|
154
|
+
if not source_path.is_absolute():
|
|
155
|
+
source_path = (project_root / source_path).resolve()
|
|
156
|
+
else:
|
|
157
|
+
source_path = source_path.resolve()
|
|
158
|
+
|
|
159
|
+
if not source_path.exists():
|
|
160
|
+
return path_value
|
|
161
|
+
|
|
162
|
+
assets_dir = output_dir / "assets" / subdir
|
|
163
|
+
assets_dir.mkdir(parents=True, exist_ok=True)
|
|
164
|
+
|
|
165
|
+
digest = hashlib.sha1(str(source_path).encode("utf-8")).hexdigest()[:8]
|
|
166
|
+
safe_prefix = prefix or "asset"
|
|
167
|
+
dest_name = f"{safe_prefix}_{digest}_{source_path.name}"
|
|
168
|
+
dest_path = assets_dir / dest_name
|
|
169
|
+
|
|
170
|
+
if not dest_path.exists():
|
|
171
|
+
shutil.copy2(source_path, dest_path)
|
|
172
|
+
|
|
173
|
+
rel_path = dest_path.relative_to(output_dir).as_posix()
|
|
174
|
+
return quote(rel_path)
|
|
@@ -76,7 +76,8 @@ def get_parameter_suggestions(model_type: str, user_param: str) -> List[str]:
|
|
|
76
76
|
for canonical, typos in common_typos.items():
|
|
77
77
|
for typo in typos:
|
|
78
78
|
if user_param.lower() == typo.lower():
|
|
79
|
-
|
|
79
|
+
if user_param != canonical:
|
|
80
|
+
suggestions.append(f"Did you mean '{canonical}' instead of '{user_param}'?")
|
|
80
81
|
# Also suggest case sensitivity if relevant
|
|
81
82
|
for canonical in common_typos.keys():
|
|
82
83
|
if user_param.lower() == canonical.lower() and user_param != canonical:
|
|
@@ -188,6 +188,7 @@ class Solution(BaseModel):
|
|
|
188
188
|
self,
|
|
189
189
|
cpu_hours: Optional[float] = None,
|
|
190
190
|
wall_time_hours: Optional[float] = None,
|
|
191
|
+
git_dir: Optional[str] = None,
|
|
191
192
|
) -> None:
|
|
192
193
|
"""Record compute metadata and capture environment details.
|
|
193
194
|
|
|
@@ -199,6 +200,7 @@ class Solution(BaseModel):
|
|
|
199
200
|
Args:
|
|
200
201
|
cpu_hours: Total CPU time consumed by the model fit in hours.
|
|
201
202
|
wall_time_hours: Real-world time consumed by the fit in hours.
|
|
203
|
+
git_dir: Optional path to the code repository for git metadata capture.
|
|
202
204
|
|
|
203
205
|
Example:
|
|
204
206
|
>>> solution = event.add_solution("1S1L", {"t0": 2459123.5, "u0": 0.1})
|
|
@@ -240,23 +242,27 @@ class Solution(BaseModel):
|
|
|
240
242
|
|
|
241
243
|
# Capture Git repository information
|
|
242
244
|
try:
|
|
245
|
+
git_cwd = Path(git_dir).expanduser().resolve() if git_dir else None
|
|
243
246
|
commit = subprocess.run(
|
|
244
247
|
["git", "rev-parse", "HEAD"],
|
|
245
248
|
capture_output=True,
|
|
246
249
|
text=True,
|
|
247
250
|
check=True,
|
|
251
|
+
cwd=git_cwd,
|
|
248
252
|
).stdout.strip()
|
|
249
253
|
branch = subprocess.run(
|
|
250
254
|
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
|
251
255
|
capture_output=True,
|
|
252
256
|
text=True,
|
|
253
257
|
check=True,
|
|
258
|
+
cwd=git_cwd,
|
|
254
259
|
).stdout.strip()
|
|
255
260
|
status = subprocess.run(
|
|
256
261
|
["git", "status", "--porcelain"],
|
|
257
262
|
capture_output=True,
|
|
258
263
|
text=True,
|
|
259
264
|
check=True,
|
|
265
|
+
cwd=git_cwd,
|
|
260
266
|
).stdout.strip()
|
|
261
267
|
self.compute_info["git_info"] = {
|
|
262
268
|
"commit": commit,
|
|
@@ -9,10 +9,12 @@ import json
|
|
|
9
9
|
import logging
|
|
10
10
|
import math
|
|
11
11
|
import os
|
|
12
|
+
import platform
|
|
12
13
|
import zipfile
|
|
13
14
|
from pathlib import Path
|
|
14
15
|
from typing import Dict, List, Optional
|
|
15
16
|
|
|
17
|
+
import psutil
|
|
16
18
|
from pydantic import BaseModel, Field
|
|
17
19
|
|
|
18
20
|
from ..text_symbols import symbol
|
|
@@ -31,10 +33,11 @@ class Submission(BaseModel):
|
|
|
31
33
|
Attributes:
|
|
32
34
|
project_path: Root directory where submission files are stored.
|
|
33
35
|
team_name: Name of the participating team (required for validation).
|
|
34
|
-
tier: Challenge tier for the submission (e.g., "
|
|
36
|
+
tier: Challenge tier for the submission (e.g., "beginner", "experienced") (required for validation).
|
|
35
37
|
hardware_info: Dictionary describing the compute platform (required for validation).
|
|
36
38
|
events: Mapping of event IDs to :class:`Event` instances.
|
|
37
39
|
repo_url: GitHub repository URL for the team codebase (required for validation).
|
|
40
|
+
git_dir: Optional path to the codebase (used for git metadata capture).
|
|
38
41
|
|
|
39
42
|
Example:
|
|
40
43
|
>>> from microlens_submit import load
|
|
@@ -44,7 +47,7 @@ class Submission(BaseModel):
|
|
|
44
47
|
>>>
|
|
45
48
|
>>> # Set submission metadata
|
|
46
49
|
>>> submission.team_name = "Team Alpha"
|
|
47
|
-
>>> submission.tier = "
|
|
50
|
+
>>> submission.tier = "experienced"
|
|
48
51
|
>>> submission.repo_url = "https://github.com/team/microlens-submit"
|
|
49
52
|
>>>
|
|
50
53
|
>>> # Add events and solutions
|
|
@@ -82,6 +85,7 @@ class Submission(BaseModel):
|
|
|
82
85
|
hardware_info: Optional[dict] = None
|
|
83
86
|
events: Dict[str, Event] = Field(default_factory=dict)
|
|
84
87
|
repo_url: Optional[str] = None
|
|
88
|
+
git_dir: Optional[str] = None
|
|
85
89
|
|
|
86
90
|
def run_validation_warnings(self) -> List[str]:
|
|
87
91
|
"""Validate the submission and return warnings only (non-blocking issues).
|
|
@@ -233,12 +237,23 @@ class Submission(BaseModel):
|
|
|
233
237
|
def autofill_nexus_info(self) -> None:
|
|
234
238
|
if self.hardware_info is None:
|
|
235
239
|
self.hardware_info = {}
|
|
240
|
+
try:
|
|
241
|
+
self.hardware_info.setdefault("platform", platform.platform())
|
|
242
|
+
self.hardware_info.setdefault("os", platform.system())
|
|
243
|
+
except Exception as exc:
|
|
244
|
+
logging.debug("Failed to read platform info: %s", exc)
|
|
236
245
|
try:
|
|
237
246
|
image = os.environ.get("JUPYTER_IMAGE_SPEC")
|
|
238
247
|
if image:
|
|
239
248
|
self.hardware_info["nexus_image"] = image
|
|
240
249
|
except Exception as exc:
|
|
241
250
|
logging.debug("Failed to read JUPYTER_IMAGE_SPEC: %s", exc)
|
|
251
|
+
try:
|
|
252
|
+
server_name = os.environ.get("JUPYTERHUB_SERVER_NAME")
|
|
253
|
+
if server_name:
|
|
254
|
+
self.hardware_info["server_name"] = server_name
|
|
255
|
+
except Exception as exc:
|
|
256
|
+
logging.debug("Failed to read JUPYTERHUB_SERVER_NAME: %s", exc)
|
|
242
257
|
try:
|
|
243
258
|
with open("/proc/cpuinfo", "r", encoding="utf-8") as fh:
|
|
244
259
|
for line in fh:
|
|
@@ -256,6 +271,22 @@ class Submission(BaseModel):
|
|
|
256
271
|
break
|
|
257
272
|
except OSError as exc:
|
|
258
273
|
logging.debug("Failed to read /proc/meminfo: %s", exc)
|
|
274
|
+
try:
|
|
275
|
+
if "memory_gb" not in self.hardware_info:
|
|
276
|
+
mem_bytes = psutil.virtual_memory().total
|
|
277
|
+
self.hardware_info["memory_gb"] = round(mem_bytes / 1024**3, 2)
|
|
278
|
+
except Exception as exc:
|
|
279
|
+
logging.debug("Failed to read memory via psutil: %s", exc)
|
|
280
|
+
try:
|
|
281
|
+
if "cpu_details" not in self.hardware_info:
|
|
282
|
+
cpu = platform.processor() or platform.machine()
|
|
283
|
+
freq = psutil.cpu_freq()
|
|
284
|
+
if freq and cpu:
|
|
285
|
+
self.hardware_info["cpu_details"] = f"{cpu} ({freq.max:.0f} MHz max)"
|
|
286
|
+
elif cpu:
|
|
287
|
+
self.hardware_info["cpu_details"] = cpu
|
|
288
|
+
except Exception as exc:
|
|
289
|
+
logging.debug("Failed to read CPU via psutil: %s", exc)
|
|
259
290
|
|
|
260
291
|
def _get_alias_lookup_path(self) -> Path:
|
|
261
292
|
return Path(self.project_path) / "aliases.json"
|
|
@@ -11,9 +11,8 @@ The module defines:
|
|
|
11
11
|
- Tier-specific validation logic
|
|
12
12
|
|
|
13
13
|
**Supported Tiers:**
|
|
14
|
-
-
|
|
15
|
-
-
|
|
16
|
-
- advanced: Advanced challenge tier with all events
|
|
14
|
+
- beginner: Beginner challenge tier with limited event set
|
|
15
|
+
- experienced: Experienced challenge tier with full event set
|
|
17
16
|
- test: Testing tier for development
|
|
18
17
|
- 2018-test: 2018 test events tier
|
|
19
18
|
- None: No validation tier (skips event validation)
|
|
@@ -22,11 +21,11 @@ Example:
|
|
|
22
21
|
>>> from microlens_submit.tier_validation import validate_event_id, TIER_DEFINITIONS
|
|
23
22
|
>>>
|
|
24
23
|
>>> # Check if an event is valid for a tier
|
|
25
|
-
>>> is_valid = validate_event_id("EVENT001", "
|
|
24
|
+
>>> is_valid = validate_event_id("EVENT001", "beginner")
|
|
26
25
|
>>> if is_valid:
|
|
27
|
-
... print("Event is valid for
|
|
28
|
-
|
|
29
|
-
... print("Event is not valid for
|
|
26
|
+
... print("Event is valid for beginner tier")
|
|
27
|
+
... else:
|
|
28
|
+
... print("Event is not valid for beginner tier")
|
|
30
29
|
|
|
31
30
|
>>> # Get available tiers
|
|
32
31
|
>>> print("Available tiers:", list(TIER_DEFINITIONS.keys()))
|
|
@@ -40,19 +39,22 @@ from typing import Dict, List, Optional, Set
|
|
|
40
39
|
|
|
41
40
|
# Tier definitions with their associated event lists
|
|
42
41
|
TIER_DEFINITIONS = {
|
|
43
|
-
"
|
|
44
|
-
"description": "
|
|
45
|
-
"
|
|
46
|
-
|
|
47
|
-
"EVENT001",
|
|
48
|
-
"EVENT002",
|
|
49
|
-
"EVENT003",
|
|
50
|
-
],
|
|
42
|
+
"beginner": {
|
|
43
|
+
"description": "Beginner challenge tier with limited event set",
|
|
44
|
+
"event_prefix": "rmdc26_",
|
|
45
|
+
"event_range": [0, 200],
|
|
51
46
|
},
|
|
52
|
-
"
|
|
53
|
-
"description": "
|
|
47
|
+
"experienced": {
|
|
48
|
+
"description": "Experienced challenge tier with full event set",
|
|
49
|
+
"event_prefix": "rmdc26_",
|
|
50
|
+
"event_range": [0, 2000],
|
|
51
|
+
},
|
|
52
|
+
"test": {
|
|
53
|
+
"description": "Testing tier for development",
|
|
54
54
|
"event_list": [
|
|
55
|
-
# Add
|
|
55
|
+
# Add test events here
|
|
56
|
+
"evt",
|
|
57
|
+
"test-event",
|
|
56
58
|
"EVENT001",
|
|
57
59
|
"EVENT002",
|
|
58
60
|
"EVENT003",
|
|
@@ -62,16 +64,10 @@ TIER_DEFINITIONS = {
|
|
|
62
64
|
"EVENT007",
|
|
63
65
|
],
|
|
64
66
|
},
|
|
65
|
-
"test": {
|
|
66
|
-
"description": "Testing tier for development",
|
|
67
|
-
"event_list": [
|
|
68
|
-
# Add test events here
|
|
69
|
-
"evt",
|
|
70
|
-
"test-event",
|
|
71
|
-
],
|
|
72
|
-
},
|
|
73
67
|
"2018-test": {
|
|
74
68
|
"description": "2018 test events tier",
|
|
69
|
+
"event_prefix": "ulwdc1_",
|
|
70
|
+
"event_range": [0, 293],
|
|
75
71
|
"event_list": [
|
|
76
72
|
# Add 2018 test events here
|
|
77
73
|
"2018-EVENT-001",
|
|
@@ -101,8 +97,8 @@ def get_tier_event_list(tier: str) -> Set[str]:
|
|
|
101
97
|
ValueError: If the tier is not defined.
|
|
102
98
|
|
|
103
99
|
Example:
|
|
104
|
-
>>> events = get_tier_event_list("
|
|
105
|
-
>>> print(f"
|
|
100
|
+
>>> events = get_tier_event_list("beginner")
|
|
101
|
+
>>> print(f"Beginner tier has {len(events)} events")
|
|
106
102
|
>>> print("EVENT001" in events)
|
|
107
103
|
"""
|
|
108
104
|
if tier not in TIER_DEFINITIONS:
|
|
@@ -110,7 +106,17 @@ def get_tier_event_list(tier: str) -> Set[str]:
|
|
|
110
106
|
|
|
111
107
|
# Use cache for performance
|
|
112
108
|
if tier not in _EVENT_LIST_CACHE:
|
|
113
|
-
|
|
109
|
+
event_list = list(TIER_DEFINITIONS[tier].get("event_list", []))
|
|
110
|
+
if "event_prefix" in TIER_DEFINITIONS[tier] and "event_range" in TIER_DEFINITIONS[tier]:
|
|
111
|
+
event_prefix = str(TIER_DEFINITIONS[tier]["event_prefix"])
|
|
112
|
+
event_range = tuple(TIER_DEFINITIONS[tier]["event_range"])
|
|
113
|
+
for i in range(event_range[0], event_range[1] + 1):
|
|
114
|
+
if tier == "2018-test":
|
|
115
|
+
event_list.append(f"{event_prefix}{i:03d}")
|
|
116
|
+
else:
|
|
117
|
+
event_list.append(f"{event_prefix}{i:04d}")
|
|
118
|
+
|
|
119
|
+
_EVENT_LIST_CACHE[tier] = set(event_list)
|
|
114
120
|
|
|
115
121
|
return _EVENT_LIST_CACHE[tier]
|
|
116
122
|
|
|
@@ -126,11 +132,11 @@ def validate_event_id(event_id: str, tier: str) -> bool:
|
|
|
126
132
|
bool: True if the event ID is valid for the tier, False otherwise.
|
|
127
133
|
|
|
128
134
|
Example:
|
|
129
|
-
>>> is_valid = validate_event_id("EVENT001", "
|
|
135
|
+
>>> is_valid = validate_event_id("EVENT001", "beginner")
|
|
130
136
|
>>> if is_valid:
|
|
131
|
-
... print("Event is valid for
|
|
137
|
+
... print("Event is valid for beginner tier")
|
|
132
138
|
>>> else:
|
|
133
|
-
... print("Event is not valid for
|
|
139
|
+
... print("Event is not valid for beginner tier")
|
|
134
140
|
"""
|
|
135
141
|
# Skip validation for "None" tier or if tier is not defined
|
|
136
142
|
if tier == "None" or tier not in TIER_DEFINITIONS:
|
|
@@ -151,7 +157,7 @@ def get_event_validation_error(event_id: str, tier: str) -> Optional[str]:
|
|
|
151
157
|
Optional[str]: Error message if the event is invalid, None if valid.
|
|
152
158
|
|
|
153
159
|
Example:
|
|
154
|
-
>>> error = get_event_validation_error("INVALID_EVENT", "
|
|
160
|
+
>>> error = get_event_validation_error("INVALID_EVENT", "beginner")
|
|
155
161
|
>>> if error:
|
|
156
162
|
... print(f"Validation error: {error}")
|
|
157
163
|
>>> else:
|
|
@@ -199,8 +205,8 @@ def get_tier_description(tier: str) -> str:
|
|
|
199
205
|
ValueError: If the tier is not defined.
|
|
200
206
|
|
|
201
207
|
Example:
|
|
202
|
-
>>> desc = get_tier_description("
|
|
203
|
-
>>> print(f"
|
|
208
|
+
>>> desc = get_tier_description("beginner")
|
|
209
|
+
>>> print(f"Beginner tier: {desc}")
|
|
204
210
|
"""
|
|
205
211
|
if tier not in TIER_DEFINITIONS:
|
|
206
212
|
raise ValueError(f"Unknown tier: {tier}. Available tiers: {list(TIER_DEFINITIONS.keys())}")
|
microlens_submit/utils.py
CHANGED
|
@@ -36,7 +36,7 @@ def load(project_path: str) -> Submission:
|
|
|
36
36
|
>>>
|
|
37
37
|
>>> # Set submission metadata
|
|
38
38
|
>>> submission.team_name = "Team Alpha"
|
|
39
|
-
>>> submission.tier = "
|
|
39
|
+
>>> submission.tier = "experienced"
|
|
40
40
|
>>> submission.repo_url = "https://github.com/team/repo"
|
|
41
41
|
>>>
|
|
42
42
|
>>> # Add an event and solution
|
|
@@ -191,6 +191,11 @@ def import_solutions_from_csv(
|
|
|
191
191
|
)
|
|
192
192
|
|
|
193
193
|
for row_num, row in enumerate(reader, start=header_row + 2):
|
|
194
|
+
row_has_data = any((key and str(key).strip()) for key in row.keys()) or any(
|
|
195
|
+
(value is not None and str(value).strip()) for value in row.values()
|
|
196
|
+
)
|
|
197
|
+
if not row_has_data:
|
|
198
|
+
continue
|
|
194
199
|
stats["total_rows"] += 1
|
|
195
200
|
|
|
196
201
|
try:
|
|
@@ -255,6 +260,8 @@ def import_solutions_from_csv(
|
|
|
255
260
|
# Parse parameters
|
|
256
261
|
parameters = {}
|
|
257
262
|
for key, value in row.items():
|
|
263
|
+
if key is None:
|
|
264
|
+
continue
|
|
258
265
|
if key not in [
|
|
259
266
|
"event_id",
|
|
260
267
|
"solution_id",
|
|
@@ -57,6 +57,7 @@ Note:
|
|
|
57
57
|
custom parameters and future model types.
|
|
58
58
|
"""
|
|
59
59
|
|
|
60
|
+
import re
|
|
60
61
|
from typing import Any, Dict, List, Optional
|
|
61
62
|
|
|
62
63
|
MODEL_DEFINITIONS = {
|
|
@@ -95,6 +96,27 @@ MODEL_DEFINITIONS = {
|
|
|
95
96
|
# },
|
|
96
97
|
}
|
|
97
98
|
|
|
99
|
+
_FLUX_PARAM_RE = re.compile(r"^F(?P<band>\\d+)_S(?:[12])?$|^F(?P<band_b>\\d+)_B$")
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def _find_flux_params(parameters: Dict[str, Any]) -> List[str]:
|
|
103
|
+
"""Return a list of parameters that look like band-specific flux terms."""
|
|
104
|
+
return [param for param in parameters.keys() if isinstance(param, str) and _FLUX_PARAM_RE.match(param)]
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def _infer_bands_from_flux_params(flux_params: List[str]) -> List[str]:
|
|
108
|
+
"""Infer band identifiers from flux parameter names."""
|
|
109
|
+
bands = set()
|
|
110
|
+
for param in flux_params:
|
|
111
|
+
match = _FLUX_PARAM_RE.match(param)
|
|
112
|
+
if not match:
|
|
113
|
+
continue
|
|
114
|
+
band = match.group("band") or match.group("band_b")
|
|
115
|
+
if band is not None:
|
|
116
|
+
bands.add(band)
|
|
117
|
+
return sorted(bands)
|
|
118
|
+
|
|
119
|
+
|
|
98
120
|
HIGHER_ORDER_EFFECT_DEFINITIONS = {
|
|
99
121
|
"parallax": {
|
|
100
122
|
"description": "Microlens parallax effect",
|
|
@@ -504,6 +526,16 @@ def check_solution_completeness(
|
|
|
504
526
|
messages.append(f"Reference time (t_ref) required for effect '{effect}'")
|
|
505
527
|
|
|
506
528
|
# Validate band-specific parameters
|
|
529
|
+
flux_params = _find_flux_params(parameters)
|
|
530
|
+
if flux_params and not bands:
|
|
531
|
+
inferred_bands = _infer_bands_from_flux_params(flux_params)
|
|
532
|
+
example_bands = inferred_bands or ["0"]
|
|
533
|
+
messages.append(
|
|
534
|
+
"Flux parameters were provided but bands is empty. "
|
|
535
|
+
"Set bands to match your flux terms (Python API: solution.bands = "
|
|
536
|
+
f"{example_bands!r})."
|
|
537
|
+
)
|
|
538
|
+
|
|
507
539
|
if bands:
|
|
508
540
|
required_flux_params = get_required_flux_params(model_type, bands)
|
|
509
541
|
for param in required_flux_params:
|
|
@@ -530,6 +562,8 @@ def check_solution_completeness(
|
|
|
530
562
|
|
|
531
563
|
# Check for invalid parameters
|
|
532
564
|
invalid_params = set(parameters.keys()) - all_valid_params
|
|
565
|
+
if flux_params and not bands:
|
|
566
|
+
invalid_params -= set(flux_params)
|
|
533
567
|
for param in invalid_params:
|
|
534
568
|
messages.append(f"Warning: Parameter '{param}' not recognized for model type " f"'{model_type}'")
|
|
535
569
|
|
|
@@ -856,6 +890,16 @@ def validate_solution_rigorously(
|
|
|
856
890
|
if not isinstance(band, str):
|
|
857
891
|
messages.append(f"band {i} must be a string, got {type(band).__name__}")
|
|
858
892
|
|
|
893
|
+
flux_params = _find_flux_params(parameters)
|
|
894
|
+
if flux_params and not bands:
|
|
895
|
+
inferred_bands = _infer_bands_from_flux_params(flux_params)
|
|
896
|
+
example_bands = inferred_bands or ["0"]
|
|
897
|
+
messages.append(
|
|
898
|
+
"Flux parameters were provided but bands is empty. "
|
|
899
|
+
"Set bands to match your flux terms (Python API: solution.bands = "
|
|
900
|
+
f"{example_bands!r})."
|
|
901
|
+
)
|
|
902
|
+
|
|
859
903
|
# 3. Check if t_ref is provided when not needed
|
|
860
904
|
t_ref_required = False
|
|
861
905
|
for effect in higher_order_effects:
|
|
@@ -892,6 +936,8 @@ def validate_solution_rigorously(
|
|
|
892
936
|
# 5. Check for invalid parameters (unless model_type or effects are "other")
|
|
893
937
|
if model_type != "other" and "other" not in higher_order_effects:
|
|
894
938
|
invalid_params = set(parameters.keys()) - valid_params
|
|
939
|
+
if flux_params and not bands:
|
|
940
|
+
invalid_params -= set(flux_params)
|
|
895
941
|
for param in invalid_params:
|
|
896
942
|
messages.append(f"Invalid parameter '{param}' for model type '{model_type}'")
|
|
897
943
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: microlens-submit
|
|
3
|
-
Version: 0.16.
|
|
3
|
+
Version: 0.16.5
|
|
4
4
|
Summary: A tool for managing and submitting microlensing solutions
|
|
5
5
|
Home-page: https://github.com/AmberLee2427/microlens-submit
|
|
6
6
|
Author: Amber Malpas
|
|
@@ -29,6 +29,7 @@ Requires-Dist: typer[all]>=0.9.0
|
|
|
29
29
|
Requires-Dist: rich>=13.0.0
|
|
30
30
|
Requires-Dist: pyyaml>=6.0
|
|
31
31
|
Requires-Dist: markdown>=3.4.0
|
|
32
|
+
Requires-Dist: psutil>=5.9.0
|
|
32
33
|
Requires-Dist: importlib_resources>=1.0.0; python_version < "3.9"
|
|
33
34
|
Provides-Extra: dev
|
|
34
35
|
Requires-Dist: pytest; extra == "dev"
|
|
@@ -94,18 +95,26 @@ The CLI is the recommended way to interact with your submission project.
|
|
|
94
95
|
|
|
95
96
|
You can pass ``--no-color`` to any command if your terminal does not support ANSI colors.
|
|
96
97
|
|
|
98
|
+
**Windows note:** If ``microlens-submit`` is not found after ``pip install``, your Python Scripts folder is likely missing from PATH.
|
|
99
|
+
Try ``py -m pip install microlens-submit`` and run ``py -m microlens_submit.cli --help``, or add the Scripts path shown by
|
|
100
|
+
``py -m pip show -f microlens-submit`` to PATH.
|
|
101
|
+
|
|
97
102
|
1. Initialize your project:
|
|
98
103
|
|
|
99
104
|
```bash
|
|
100
|
-
microlens-submit init --team-name "Planet Pounders" --tier "
|
|
101
|
-
|
|
105
|
+
microlens-submit init --team-name "Planet Pounders" --tier "experienced" ./my_submission
|
|
106
|
+
cd ./my_submission
|
|
102
107
|
```
|
|
103
108
|
|
|
104
|
-
|
|
109
|
+
If you prefer to initialize inside an existing folder, run `microlens-submit init` without a path after `cd` into it.
|
|
110
|
+
|
|
111
|
+
To pass validation, you need to have provided a `repo_url` and `hardware_info` to the project and have a git project initialized in your sumission-project directory. On Roman Nexus, you can use `microlens-submit nexus-init` to auto-populate hardware info.
|
|
105
112
|
|
|
106
113
|
```bash
|
|
107
114
|
microlens-submit set-repo-url <url> ./
|
|
108
115
|
microlens-submit set-hardware-info --cpu-details "intel i7 xxx" --ram-gb 32 ./
|
|
116
|
+
# if your git repo lives elsewhere:
|
|
117
|
+
microlens-submit set-git-dir /path/to/repo ./
|
|
109
118
|
```
|
|
110
119
|
2. Add a new solution to an event:
|
|
111
120
|
|
|
@@ -152,7 +161,7 @@ import microlens_submit
|
|
|
152
161
|
# Load or create the project
|
|
153
162
|
sub = microlens_submit.load(project_path="./my_challenge_submission")
|
|
154
163
|
sub.team_name = "Planet Pounders"
|
|
155
|
-
sub.tier = "
|
|
164
|
+
sub.tier = "experienced"
|
|
156
165
|
|
|
157
166
|
# Get an event and add a solution
|
|
158
167
|
evt = sub.get_event("ogle-2025-blg-0042")
|
|
@@ -201,15 +210,15 @@ import this file directly.
|
|
|
201
210
|
|
|
202
211
|
Bibtex:
|
|
203
212
|
```
|
|
204
|
-
@software{
|
|
213
|
+
@software{malpas_2025_18246117,
|
|
205
214
|
author = {Malpas, Amber},
|
|
206
215
|
title = {microlens-submit},
|
|
207
216
|
month = oct,
|
|
208
217
|
year = 2025,
|
|
209
218
|
publisher = {Zenodo},
|
|
210
219
|
version = {v0.16.3},
|
|
211
|
-
doi = {10.5281/zenodo.
|
|
212
|
-
url = {https://doi.org/10.5281/zenodo.
|
|
220
|
+
doi = {10.5281/zenodo.18246117},
|
|
221
|
+
url = {https://doi.org/10.5281/zenodo.18246117},
|
|
213
222
|
}
|
|
214
223
|
```
|
|
215
224
|
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
microlens_submit/__init__.py,sha256=fyqDCZ_UClGfQ7zkhWJC8HFJqh2RQDudEXw2LwhLIYs,399
|
|
2
|
+
microlens_submit/error_messages.py,sha256=8Wzx1NQiAF7hUOjlt-uGhtPC7akknv_PDkokoxhWquk,10411
|
|
3
|
+
microlens_submit/text_symbols.py,sha256=PbQSkqF_FwTBs45TkUL0zZl74IYDz7L4xVxy8eKxQsU,3146
|
|
4
|
+
microlens_submit/tier_validation.py,sha256=FbVHQwItqQnrCeKhkQSgQmMT6dgreTGOptRRYf-AYVE,6649
|
|
5
|
+
microlens_submit/utils.py,sha256=0VwCgFOAD4xoNpvY-k15axrb667cXaspzuKrZCGLJ-w,14676
|
|
6
|
+
microlens_submit/validate_parameters.py,sha256=lCbLTeCzjZQG00zqZ5yBfDs1PcAzqxuDoJsAQ8W-aFw,39394
|
|
7
|
+
microlens_submit/assets/github-desktop_logo.png,sha256=pb4rallKrYQPHt6eC0TmJe_UyyMtf1IrP8_OWK19nH8,479821
|
|
8
|
+
microlens_submit/assets/rges-pit_logo.png,sha256=45AJypXCymvt3lMeK7MHt1SBhwPpnKCMj6S000Cejtc,537645
|
|
9
|
+
microlens_submit/cli/__init__.py,sha256=u4ZgVOzUe_gf89FBhY61XWjcfK4oxXCStabYTjBuRRo,82
|
|
10
|
+
microlens_submit/cli/__main__.py,sha256=_7x5Hd_9ehQwYHphd3iPB3LMVvKAVmxz-e0E4pWi0lk,112
|
|
11
|
+
microlens_submit/cli/main.py,sha256=6Q_pxnMFOHDo8iPWefgFqOPkG3-GPGntv6j40hy9kb0,3988
|
|
12
|
+
microlens_submit/cli/commands/__init__.py,sha256=rzIgY7T2Bz4Lhzts_RiWeoBbMoCuxODxeoktnUhH4rw,49
|
|
13
|
+
microlens_submit/cli/commands/dossier.py,sha256=6gRJNzUgr29YmYJRcUj9aoiRhjb1r9Uy4dip6z2LaHI,5100
|
|
14
|
+
microlens_submit/cli/commands/export.py,sha256=ojG2KkOcl90eA0tse6hqy74fHc-YX0o0alzAAbpzJew,8513
|
|
15
|
+
microlens_submit/cli/commands/init.py,sha256=tpO8YlWZLJmo4PuqQTKHXzvniIyWl7WXxUOURp_yfn4,7425
|
|
16
|
+
microlens_submit/cli/commands/solutions.py,sha256=GvbAiiTaCaqcDRHOAReaMY-gDLlahRpYvpnKc2Rajs0,32146
|
|
17
|
+
microlens_submit/cli/commands/validation.py,sha256=1M8mYSNopsA30u3S-yFBd3iJZxsDoNa0vhwo9brJ1VQ,9028
|
|
18
|
+
microlens_submit/dossier/__init__.py,sha256=INAacbrY0Wi5ueH8c7b156bGzelyUFcynbE7_YRiku0,1948
|
|
19
|
+
microlens_submit/dossier/dashboard.py,sha256=4OvTUCxIC4LbAqKwimIFhi65fNo5MMJswiQ5OWtyWFA,19907
|
|
20
|
+
microlens_submit/dossier/event_page.py,sha256=7740o3tpW9Urv7GSzYdp2TiphvDi6U7XnjlLZYipvLw,14878
|
|
21
|
+
microlens_submit/dossier/full_report.py,sha256=cNkaUML93oRnARp4VnLkbRMudrByWyj06ohvmH4Qcq0,14310
|
|
22
|
+
microlens_submit/dossier/solution_page.py,sha256=-5kgkOZ9ziNRNAFpVeT_9-6aCcQRL4vv9APopKDFeAw,24748
|
|
23
|
+
microlens_submit/dossier/utils.py,sha256=-DbWByBMsEeQZ-eUyRT74O_3lakE1vHKUD62jPj1_t4,5839
|
|
24
|
+
microlens_submit/models/__init__.py,sha256=1sHFjAWyFtGgQBRSo8lBYiPzToo4tIoHP3uBjtgJSPY,861
|
|
25
|
+
microlens_submit/models/event.py,sha256=ifQqE7d7PJTTI9lGylwWV3EGxgyyNGiJtHbm_DLmuys,17105
|
|
26
|
+
microlens_submit/models/solution.py,sha256=ollTpKv8zMSEqIL2Q9gXJTbaX0fWZt6rg76edBmYOWQ,23629
|
|
27
|
+
microlens_submit/models/submission.py,sha256=f_ewUFhXghnh-pn077bkfBg_6jVbcN_jRhy2wVdKUgk,27941
|
|
28
|
+
microlens_submit-0.16.5.dist-info/licenses/LICENSE,sha256=cy1qkVR-kGxD6FXVsparmU2vHJXYeoyAAHv6SgT67sw,1069
|
|
29
|
+
microlens_submit-0.16.5.dist-info/METADATA,sha256=4mSk_0rksMNxU7NOl2dEA7J73Zvb1SwJ-DKGW00WPkM,10673
|
|
30
|
+
microlens_submit-0.16.5.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
31
|
+
microlens_submit-0.16.5.dist-info/entry_points.txt,sha256=kA85yhxYrpQnUvVZCRS2giz52gaf1ZOfZFjY4RHIZ2s,62
|
|
32
|
+
microlens_submit-0.16.5.dist-info/top_level.txt,sha256=uJ9_bADYRySlhEpP-8vTm90ZLV2SrKEzutAaRx8WF0k,17
|
|
33
|
+
microlens_submit-0.16.5.dist-info/RECORD,,
|
|
@@ -1,33 +0,0 @@
|
|
|
1
|
-
microlens_submit/__init__.py,sha256=sOcJ78On7mNKtZnfEtYhLM3bm8heNCmpr7NxHWG218A,399
|
|
2
|
-
microlens_submit/error_messages.py,sha256=x_x0ZE-y7XSxbqmX4G6xR2EjVOAsldLILGalmJZtLO8,10363
|
|
3
|
-
microlens_submit/text_symbols.py,sha256=PbQSkqF_FwTBs45TkUL0zZl74IYDz7L4xVxy8eKxQsU,3146
|
|
4
|
-
microlens_submit/tier_validation.py,sha256=mbE9MjXRHbsPBB12aD2lyYUdvXiBacweRG2OTWgP7Fs,6168
|
|
5
|
-
microlens_submit/utils.py,sha256=3kEinJXr5OWHjrQUGHhUFA6L2rNdqaLy2ogmQNx_TfY,14358
|
|
6
|
-
microlens_submit/validate_parameters.py,sha256=_RBzdbY2sFwtNNd5f69bwxTpHjlNq_np3iDYvio4Oow,37645
|
|
7
|
-
microlens_submit/assets/github-desktop_logo.png,sha256=pb4rallKrYQPHt6eC0TmJe_UyyMtf1IrP8_OWK19nH8,479821
|
|
8
|
-
microlens_submit/assets/rges-pit_logo.png,sha256=45AJypXCymvt3lMeK7MHt1SBhwPpnKCMj6S000Cejtc,537645
|
|
9
|
-
microlens_submit/cli/__init__.py,sha256=u4ZgVOzUe_gf89FBhY61XWjcfK4oxXCStabYTjBuRRo,82
|
|
10
|
-
microlens_submit/cli/__main__.py,sha256=_7x5Hd_9ehQwYHphd3iPB3LMVvKAVmxz-e0E4pWi0lk,112
|
|
11
|
-
microlens_submit/cli/main.py,sha256=iJ2bkQZ-OiuCUCVqs_EGbhpa240zB7agrYNZj6p5_3g,3938
|
|
12
|
-
microlens_submit/cli/commands/__init__.py,sha256=rzIgY7T2Bz4Lhzts_RiWeoBbMoCuxODxeoktnUhH4rw,49
|
|
13
|
-
microlens_submit/cli/commands/dossier.py,sha256=6gRJNzUgr29YmYJRcUj9aoiRhjb1r9Uy4dip6z2LaHI,5100
|
|
14
|
-
microlens_submit/cli/commands/export.py,sha256=umtziho0Rv7IRhcVU5NnuUajdaa0SgatZbRmBw8OJ8o,6751
|
|
15
|
-
microlens_submit/cli/commands/init.py,sha256=sarZXDLcWO5JgXbgb5RwllV0JrMMjS3t3geq9RmhVlE,7275
|
|
16
|
-
microlens_submit/cli/commands/solutions.py,sha256=26G6gMLvxFJJYMhswgTSR_iF1xDR7e_9P-_MesV9Jco,30350
|
|
17
|
-
microlens_submit/cli/commands/validation.py,sha256=1M8mYSNopsA30u3S-yFBd3iJZxsDoNa0vhwo9brJ1VQ,9028
|
|
18
|
-
microlens_submit/dossier/__init__.py,sha256=INAacbrY0Wi5ueH8c7b156bGzelyUFcynbE7_YRiku0,1948
|
|
19
|
-
microlens_submit/dossier/dashboard.py,sha256=4OvTUCxIC4LbAqKwimIFhi65fNo5MMJswiQ5OWtyWFA,19907
|
|
20
|
-
microlens_submit/dossier/event_page.py,sha256=F9waw-Ce2_4ikdCPo-hNURUSYEPGMCfvsY3PbCXpsFg,14425
|
|
21
|
-
microlens_submit/dossier/full_report.py,sha256=zQXoo6ZQfwv_NNFFel3ZYW1DgnqD--VU0L7J7p9yEng,12864
|
|
22
|
-
microlens_submit/dossier/solution_page.py,sha256=qp2JaDamHD__3bwOzZ3CRj2UUSCiOf94wNyOfkQUGGU,23593
|
|
23
|
-
microlens_submit/dossier/utils.py,sha256=LopBbVg6nzQasL1lnaI63y3bpmqYqBeDEwfB_NqEeCA,3845
|
|
24
|
-
microlens_submit/models/__init__.py,sha256=1sHFjAWyFtGgQBRSo8lBYiPzToo4tIoHP3uBjtgJSPY,861
|
|
25
|
-
microlens_submit/models/event.py,sha256=ifQqE7d7PJTTI9lGylwWV3EGxgyyNGiJtHbm_DLmuys,17105
|
|
26
|
-
microlens_submit/models/solution.py,sha256=0prKFYM3oxKXZjtzhaAaS7E5cYP2maxfbYNQhkL-IY0,23339
|
|
27
|
-
microlens_submit/models/submission.py,sha256=54ssVomKh8W_nYBeuRAPexko0d_4FwcWKEl9q5Vu1as,26458
|
|
28
|
-
microlens_submit-0.16.3.dist-info/licenses/LICENSE,sha256=cy1qkVR-kGxD6FXVsparmU2vHJXYeoyAAHv6SgT67sw,1069
|
|
29
|
-
microlens_submit-0.16.3.dist-info/METADATA,sha256=5zBGMg8sUpfhJIgrhAMMk8xe-Rwe6fdu8olhY24DWgQ,10084
|
|
30
|
-
microlens_submit-0.16.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
31
|
-
microlens_submit-0.16.3.dist-info/entry_points.txt,sha256=kA85yhxYrpQnUvVZCRS2giz52gaf1ZOfZFjY4RHIZ2s,62
|
|
32
|
-
microlens_submit-0.16.3.dist-info/top_level.txt,sha256=uJ9_bADYRySlhEpP-8vTm90ZLV2SrKEzutAaRx8WF0k,17
|
|
33
|
-
microlens_submit-0.16.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|