so-campaign-manager 0.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- so_campaign_manager-0.0.4.dist-info/METADATA +179 -0
- so_campaign_manager-0.0.4.dist-info/RECORD +44 -0
- so_campaign_manager-0.0.4.dist-info/WHEEL +5 -0
- so_campaign_manager-0.0.4.dist-info/entry_points.txt +2 -0
- so_campaign_manager-0.0.4.dist-info/licenses/LICENSE +24 -0
- so_campaign_manager-0.0.4.dist-info/top_level.txt +1 -0
- socm/__about__.py +34 -0
- socm/__init__.py +0 -0
- socm/__main__.py +35 -0
- socm/bookkeeper/__init__.py +1 -0
- socm/bookkeeper/bookkeeper.py +488 -0
- socm/configs/slurmise.toml +2 -0
- socm/core/__init__.py +1 -0
- socm/core/models.py +235 -0
- socm/enactor/__init__.py +3 -0
- socm/enactor/base.py +123 -0
- socm/enactor/dryrun_enactor.py +216 -0
- socm/enactor/rp_enactor.py +273 -0
- socm/execs/__init__.py +3 -0
- socm/execs/mapmaking.py +73 -0
- socm/planner/__init__.py +2 -0
- socm/planner/base.py +87 -0
- socm/planner/heft_planner.py +442 -0
- socm/resources/__init__.py +5 -0
- socm/resources/perlmutter.py +22 -0
- socm/resources/tiger.py +24 -0
- socm/resources/universe.py +18 -0
- socm/utils/__init__.py +0 -0
- socm/utils/misc.py +90 -0
- socm/utils/states.py +17 -0
- socm/workflows/__init__.py +41 -0
- socm/workflows/ml_mapmaking.py +111 -0
- socm/workflows/ml_null_tests/__init__.py +10 -0
- socm/workflows/ml_null_tests/base.py +117 -0
- socm/workflows/ml_null_tests/day_night_null_test.py +132 -0
- socm/workflows/ml_null_tests/direction_null_test.py +133 -0
- socm/workflows/ml_null_tests/elevation_null_test.py +118 -0
- socm/workflows/ml_null_tests/moon_close_null_test.py +165 -0
- socm/workflows/ml_null_tests/moonrise_set_null_test.py +151 -0
- socm/workflows/ml_null_tests/pwv_null_test.py +118 -0
- socm/workflows/ml_null_tests/sun_close_null_test.py +173 -0
- socm/workflows/ml_null_tests/time_null_test.py +76 -0
- socm/workflows/ml_null_tests/wafer_null_test.py +175 -0
- socm/workflows/sat_simulation.py +76 -0
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
from datetime import timedelta
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from typing import Dict, List, Optional, Union
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
from pydantic import PrivateAttr
|
|
7
|
+
from sotodlib.core import Context
|
|
8
|
+
|
|
9
|
+
from socm.workflows.ml_null_tests import NullTestWorkflow
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class WaferNullTestWorkflow(NullTestWorkflow):
|
|
13
|
+
"""
|
|
14
|
+
A workflow for time null tests.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
chunk_nobs: Optional[int] = None
|
|
18
|
+
chunk_duration: Optional[timedelta] = None
|
|
19
|
+
nsplits: int = 8
|
|
20
|
+
name: str = "wafer_null_test_workflow"
|
|
21
|
+
|
|
22
|
+
_wafer_list_per_telescope: Dict[str, List[str]] = PrivateAttr(
|
|
23
|
+
{
|
|
24
|
+
"sat": [
|
|
25
|
+
"st1:ws0",
|
|
26
|
+
"st1:ws1",
|
|
27
|
+
"st1:ws2",
|
|
28
|
+
"st1:ws3",
|
|
29
|
+
"st1:ws4",
|
|
30
|
+
"st1:ws5",
|
|
31
|
+
"st1:ws6",
|
|
32
|
+
],
|
|
33
|
+
"act": ["st1:ws0"],
|
|
34
|
+
"lat": [
|
|
35
|
+
"c1:ws0",
|
|
36
|
+
"c1:ws1",
|
|
37
|
+
"c1:ws2",
|
|
38
|
+
"i1:ws0",
|
|
39
|
+
"i1:ws1",
|
|
40
|
+
"i1:ws2",
|
|
41
|
+
"i2:ws0",
|
|
42
|
+
"i2:ws1",
|
|
43
|
+
"i2:ws2",
|
|
44
|
+
"i3:ws0",
|
|
45
|
+
"i3:ws1",
|
|
46
|
+
"i3:ws2",
|
|
47
|
+
"i4:ws0",
|
|
48
|
+
"i4:ws1",
|
|
49
|
+
"i4:ws2",
|
|
50
|
+
"i5:ws0",
|
|
51
|
+
"i5:ws1",
|
|
52
|
+
"i5:ws2",
|
|
53
|
+
"i6:ws0",
|
|
54
|
+
"i6:ws1",
|
|
55
|
+
"i6:ws2",
|
|
56
|
+
],
|
|
57
|
+
"so_sat": [
|
|
58
|
+
"st1:ws0",
|
|
59
|
+
"st1:ws1",
|
|
60
|
+
"st1:ws2",
|
|
61
|
+
"st1:ws3",
|
|
62
|
+
"st1:ws4",
|
|
63
|
+
"st1:ws5",
|
|
64
|
+
"st1:ws6",
|
|
65
|
+
],
|
|
66
|
+
"so_lat": [
|
|
67
|
+
"c1:ws0",
|
|
68
|
+
"c1:ws1",
|
|
69
|
+
"c1:ws2",
|
|
70
|
+
"i1:ws0",
|
|
71
|
+
"i1:ws1",
|
|
72
|
+
"i1:ws2",
|
|
73
|
+
"i2:ws0",
|
|
74
|
+
"i2:ws1",
|
|
75
|
+
"i2:ws2",
|
|
76
|
+
"i3:ws0",
|
|
77
|
+
"i3:ws1",
|
|
78
|
+
"i3:ws2",
|
|
79
|
+
"i4:ws0",
|
|
80
|
+
"i4:ws1",
|
|
81
|
+
"i4:ws2",
|
|
82
|
+
"i5:ws0",
|
|
83
|
+
"i5:ws1",
|
|
84
|
+
"i5:ws2",
|
|
85
|
+
"i6:ws0",
|
|
86
|
+
"i6:ws1",
|
|
87
|
+
"i6:ws2",
|
|
88
|
+
],
|
|
89
|
+
}
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
def _get_splits(
|
|
93
|
+
self, ctx: Context, obs_info: Dict[str, Dict[str, Union[float, str]]]
|
|
94
|
+
) -> Dict[str, List[str]]:
|
|
95
|
+
"""
|
|
96
|
+
Distribute the observations across splits based on the context and observation IDs.
|
|
97
|
+
"""
|
|
98
|
+
# Find the obs with the most wafers.
|
|
99
|
+
# For each wafer do the same as the time null test.
|
|
100
|
+
if self.chunk_nobs is None and self.chunk_duration is None:
|
|
101
|
+
raise ValueError("Either chunk_nobs or duration must be set.")
|
|
102
|
+
elif self.chunk_nobs is not None and self.chunk_duration is not None:
|
|
103
|
+
raise ValueError("Only one of chunk_nobs or duration can be set.")
|
|
104
|
+
elif self.chunk_nobs is None:
|
|
105
|
+
# Decide the chunk size based on the duration. Each chunk needs to have the
|
|
106
|
+
# observataions that their start times are just less than chunk_duration.
|
|
107
|
+
raise NotImplementedError(
|
|
108
|
+
"Splitting by duration is not implemented yet. Please set chunk_nobs."
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
tube_slots = set([v["tube_slot"] for v in obs_info.values()])
|
|
112
|
+
if len(tube_slots) > 1:
|
|
113
|
+
raise ValueError(
|
|
114
|
+
f"All observations must be from the same tube slot. Found: {tube_slots}"
|
|
115
|
+
)
|
|
116
|
+
final_splits = {}
|
|
117
|
+
for tube_wafer in self._wafer_list_per_telescope[self.site]:
|
|
118
|
+
tube_slot, wafer = tube_wafer.split(":")
|
|
119
|
+
wafer_obs_info = dict()
|
|
120
|
+
for k, v in obs_info.items():
|
|
121
|
+
if (
|
|
122
|
+
v["wafer_list"] is not None
|
|
123
|
+
and v["tube_slot"] is not None
|
|
124
|
+
and wafer in v["wafer_list"]
|
|
125
|
+
and tube_slot in v["tube_slot"]
|
|
126
|
+
):
|
|
127
|
+
wafer_obs_info[k] = v
|
|
128
|
+
if not wafer_obs_info:
|
|
129
|
+
continue
|
|
130
|
+
|
|
131
|
+
sorted_ids = sorted(
|
|
132
|
+
wafer_obs_info, key=lambda k: wafer_obs_info[k]["start_time"]
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
num_chunks = self._get_num_chunks(len(sorted_ids))
|
|
136
|
+
obs_lists = np.array_split(sorted_ids, num_chunks) if num_chunks > 0 else []
|
|
137
|
+
splits = [[] for _ in range(self.nsplits)]
|
|
138
|
+
for i, obs_list in enumerate(obs_lists):
|
|
139
|
+
splits[i % self.nsplits] += obs_list.tolist()
|
|
140
|
+
final_splits[tube_wafer] = splits
|
|
141
|
+
|
|
142
|
+
return final_splits
|
|
143
|
+
|
|
144
|
+
@classmethod
|
|
145
|
+
def get_workflows(cls, desc=None) -> List[NullTestWorkflow]:
|
|
146
|
+
"""
|
|
147
|
+
Create a list of NullTestWorkflows instances from the provided descriptions.
|
|
148
|
+
"""
|
|
149
|
+
|
|
150
|
+
wafer_workflow = cls(**desc)
|
|
151
|
+
|
|
152
|
+
workflows = []
|
|
153
|
+
for tube_wafer, wafer_split in wafer_workflow._splits.items():
|
|
154
|
+
_, wafer = tube_wafer.split(":")
|
|
155
|
+
for idx, split in enumerate(wafer_split):
|
|
156
|
+
if not split:
|
|
157
|
+
continue
|
|
158
|
+
desc = wafer_workflow.model_dump(exclude_unset=True)
|
|
159
|
+
desc["name"] = f"wafer_{wafer}_split_{idx + 1}_null_test_workflow"
|
|
160
|
+
desc["wafer"] = wafer
|
|
161
|
+
desc["output_dir"] = (
|
|
162
|
+
f"{wafer_workflow.output_dir}/wafer_{wafer}_split_{idx + 1}"
|
|
163
|
+
)
|
|
164
|
+
desc["datasize"] = 0
|
|
165
|
+
query_file = Path(desc["output_dir"]) / "query.txt"
|
|
166
|
+
query_file.parent.mkdir(parents=True, exist_ok=True)
|
|
167
|
+
with open(query_file, "w") as f:
|
|
168
|
+
for oid in split:
|
|
169
|
+
f.write(f"{oid}\n")
|
|
170
|
+
desc["query"] = f"file://{str(query_file.absolute())}"
|
|
171
|
+
desc["chunk_nobs"] = 1
|
|
172
|
+
workflow = NullTestWorkflow(**desc)
|
|
173
|
+
workflows.append(workflow)
|
|
174
|
+
|
|
175
|
+
return workflows
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
from typing import Any, Dict, Optional
|
|
3
|
+
|
|
4
|
+
from pydantic import PrivateAttr
|
|
5
|
+
|
|
6
|
+
from ..core.models import Workflow
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class SATSimWorkflow(Workflow):
|
|
10
|
+
"""
|
|
11
|
+
A workflow for simulating SAT observations.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
output_dir: str
|
|
15
|
+
name: str = "sat_sims"
|
|
16
|
+
executable: str = "toast_so_sim"
|
|
17
|
+
schedule: Optional[str] = None
|
|
18
|
+
bands: Optional[str] = "SAT_f090"
|
|
19
|
+
wafer_slots: Optional[str] = "w25"
|
|
20
|
+
sample_rate: int = 37
|
|
21
|
+
sim_noise: bool = False
|
|
22
|
+
scan_map: bool = False
|
|
23
|
+
sim_atmosphere: bool = False
|
|
24
|
+
sim_sss: bool = False
|
|
25
|
+
sim_hwpss: bool = False
|
|
26
|
+
sim_hwpss_atmo_data: Optional[str] = None
|
|
27
|
+
pixels_healpix_radec_nside: int = 512
|
|
28
|
+
filterbin_name: Optional[str] = None
|
|
29
|
+
processing_mask_file: Optional[str] = None
|
|
30
|
+
|
|
31
|
+
_arg_translation: Dict[str, str] = PrivateAttr(
|
|
32
|
+
{
|
|
33
|
+
"sim_hwpss_atmo_data": "sim_hwpss.atmo_data",
|
|
34
|
+
"pixels_healpix_radec_nside": "pixels_healpix_radec.nside",
|
|
35
|
+
"filterbin_name": "filterbin.name",
|
|
36
|
+
"processing_mask_file": "processing_mask.file",
|
|
37
|
+
}
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
def get_command(self, **kargs: Any) -> str:
|
|
41
|
+
"""
|
|
42
|
+
Get the command to run the ML mapmaking workflow.
|
|
43
|
+
"""
|
|
44
|
+
if self.resources is None:
|
|
45
|
+
raise ValueError("Resources must be set before calling get_command")
|
|
46
|
+
command = f"srun --cpu_bind=cores --export=ALL --ntasks-per-node={self.resources['ranks']} --cpus-per-task={self.resources['threads']} {self.executable} {self.subcommand} --job_group_size={self.resources['ranks']} "
|
|
47
|
+
command += self.get_arguments()
|
|
48
|
+
|
|
49
|
+
return command.strip()
|
|
50
|
+
|
|
51
|
+
def get_arguments(self, **kargs: Any) -> str:
|
|
52
|
+
"""
|
|
53
|
+
Get the command to run the ML mapmaking workflow.
|
|
54
|
+
"""
|
|
55
|
+
arguments = f"--out {self.output_dir} "
|
|
56
|
+
sorted_workflow = dict(sorted(self.model_dump().items()))
|
|
57
|
+
|
|
58
|
+
for k, v in sorted_workflow.items():
|
|
59
|
+
if isinstance(v, str) and v.startswith("file://"):
|
|
60
|
+
v = Path(v.split("file://")[-1]).absolute()
|
|
61
|
+
if k not in [
|
|
62
|
+
"name",
|
|
63
|
+
"output_dir",
|
|
64
|
+
"executable",
|
|
65
|
+
"id",
|
|
66
|
+
"environment",
|
|
67
|
+
"resources",
|
|
68
|
+
]:
|
|
69
|
+
if isinstance(v, bool):
|
|
70
|
+
if v:
|
|
71
|
+
arguments += f"--{k}.enable "
|
|
72
|
+
else:
|
|
73
|
+
arguments += f"--{k}.disable "
|
|
74
|
+
else:
|
|
75
|
+
arguments += f"--{self._arg_translation.get(k, k)}={v} "
|
|
76
|
+
return arguments.strip()
|