sodetlib 0.6.1rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sodetlib/__init__.py +22 -0
- sodetlib/_version.py +21 -0
- sodetlib/constants.py +13 -0
- sodetlib/det_config.py +709 -0
- sodetlib/noise.py +624 -0
- sodetlib/operations/__init__.py +5 -0
- sodetlib/operations/bias_dets.py +551 -0
- sodetlib/operations/bias_steps.py +1248 -0
- sodetlib/operations/bias_wave.py +688 -0
- sodetlib/operations/complex_impedance.py +651 -0
- sodetlib/operations/iv.py +716 -0
- sodetlib/operations/optimize.py +189 -0
- sodetlib/operations/squid_curves.py +641 -0
- sodetlib/operations/tracking.py +624 -0
- sodetlib/operations/uxm_relock.py +406 -0
- sodetlib/operations/uxm_setup.py +783 -0
- sodetlib/py.typed +0 -0
- sodetlib/quality_control.py +415 -0
- sodetlib/resonator_fitting.py +508 -0
- sodetlib/stream.py +291 -0
- sodetlib/tes_param_correction.py +579 -0
- sodetlib/util.py +880 -0
- sodetlib-0.6.1rc1.data/scripts/jackhammer +761 -0
- sodetlib-0.6.1rc1.dist-info/LICENSE +25 -0
- sodetlib-0.6.1rc1.dist-info/METADATA +6 -0
- sodetlib-0.6.1rc1.dist-info/RECORD +28 -0
- sodetlib-0.6.1rc1.dist-info/WHEEL +5 -0
- sodetlib-0.6.1rc1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,761 @@
|
|
|
1
|
+
#!python
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
import subprocess
|
|
5
|
+
import shlex
|
|
6
|
+
import yaml
|
|
7
|
+
import sys
|
|
8
|
+
import time
|
|
9
|
+
import os
|
|
10
|
+
import threading
|
|
11
|
+
from typing import List, Literal
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class TermColors:
|
|
15
|
+
HEADER = '\n\033[95m'
|
|
16
|
+
OKBLUE = '\033[94m'
|
|
17
|
+
OKGREEN = '\033[92m'
|
|
18
|
+
WARNING = '\033[93m'
|
|
19
|
+
FAIL = '\033[91m'
|
|
20
|
+
ENDC = '\033[0m'
|
|
21
|
+
BOLD = '\033[1m'
|
|
22
|
+
UNDERLINE = '\033[4m'
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def cprint(msg, style=TermColors.OKBLUE):
|
|
26
|
+
if style == True:
|
|
27
|
+
style = TermColors.OKGREEN
|
|
28
|
+
elif style == False:
|
|
29
|
+
style = TermColors.FAIL
|
|
30
|
+
print(style + str(msg) + TermColors.ENDC)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
# This should be the same for every smurf-srv
|
|
34
|
+
if 'SMURF_CONFIG_DIR' in os.environ:
|
|
35
|
+
cfg_dir = os.environ['SMURF_CONFIG_DIR']
|
|
36
|
+
elif 'OCS_CONFIG_DIR' in os.environ:
|
|
37
|
+
cfg_dir = os.environ['OCS_CONFIG_DIR']
|
|
38
|
+
style = TermColors.WARNING
|
|
39
|
+
cprint("SMURF_CONFIG_DIR not found in environ...", style=TermColors.WARNING)
|
|
40
|
+
cprint( f"Using OCS_CONFIG_DIR instead: {cfg_dir}...", style=TermColors.WARNING )
|
|
41
|
+
else:
|
|
42
|
+
raise ValueError(
|
|
43
|
+
"SMURF_CONFIG_DIR or OCS_CONFIG_DIR must be set in the environment"
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
cwd=cfg_dir
|
|
47
|
+
sys_config_file = os.path.join(cfg_dir, 'sys_config.yml')
|
|
48
|
+
|
|
49
|
+
with open(sys_config_file, 'r') as stream:
|
|
50
|
+
sys_config = yaml.safe_load(stream)
|
|
51
|
+
|
|
52
|
+
use_hostmanager: bool = sys_config.get('use_hostmanager', False)
|
|
53
|
+
docker_compose_cmd: str = sys_config.get('docker_compose_command', 'docker compose')
|
|
54
|
+
|
|
55
|
+
def get_pysmurf_controller_docker_service(slot: int) -> str:
|
|
56
|
+
"""
|
|
57
|
+
Returns the pysmurf-controller docker service name to use for a given slot.
|
|
58
|
+
Defaults to "ocs-pysmurf-s<slot>" if not specified in the sys_config.
|
|
59
|
+
"""
|
|
60
|
+
slot_cfg = sys_config['slots'][f'SLOT[{slot}]']
|
|
61
|
+
return slot_cfg.get("pysmurf_controller_docker_service", f"ocs-pysmurf-s{slot}")
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def get_slot_ip(slot):
|
|
65
|
+
if 'switch_ip' not in sys_config:
|
|
66
|
+
return f"10.0.{sys_config['crate_id']}.{slot + 100}"
|
|
67
|
+
ip_list = sys_config['switch_ip'].split('.')
|
|
68
|
+
res = '.'.join(ip_list[:-1] + [str(slot + 100)])
|
|
69
|
+
print(res)
|
|
70
|
+
return res
|
|
71
|
+
|
|
72
|
+
########################################################################
|
|
73
|
+
# Various utility functions
|
|
74
|
+
########################################################################
|
|
75
|
+
def get_docker_services():
|
|
76
|
+
"""
|
|
77
|
+
Returns a list of docker services that are available from the docker-compose
|
|
78
|
+
file in the ocs config directory.
|
|
79
|
+
"""
|
|
80
|
+
cmd = 'docker compose config --services'
|
|
81
|
+
res = subprocess.run(shlex.split(cmd), cwd=cwd, stdout=subprocess.PIPE)
|
|
82
|
+
return res.stdout.decode().split()
|
|
83
|
+
|
|
84
|
+
def util_run(cmd, args=[], name=None, rm=True, **run_kwargs):
|
|
85
|
+
"""
|
|
86
|
+
Runs a command using subproces.run within the sodetlib util docker.
|
|
87
|
+
|
|
88
|
+
Args
|
|
89
|
+
------
|
|
90
|
+
cmd : string
|
|
91
|
+
Command to run
|
|
92
|
+
args : List[string]
|
|
93
|
+
List of arguments to pass to the command
|
|
94
|
+
name : string, optional
|
|
95
|
+
Name of the docker container. If none is specified this will not be
|
|
96
|
+
set and docker-compose will choose the default.
|
|
97
|
+
rm : bool
|
|
98
|
+
If True, will remove the container when the command has finished.
|
|
99
|
+
run_kwargs : Additional keyword arguments
|
|
100
|
+
Any additional kwargs specified will be passed directly to the
|
|
101
|
+
subprocess.run function. See the subprocess docs for allowed kwargs:
|
|
102
|
+
https://docs.python.org/3/library/subprocess.html#subprocess.run
|
|
103
|
+
"""
|
|
104
|
+
cmd = f'{docker_compose_cmd} run --entrypoint={cmd} '
|
|
105
|
+
if name is not None:
|
|
106
|
+
cmd += f'--name={name} '
|
|
107
|
+
if rm:
|
|
108
|
+
cmd += '--rm '
|
|
109
|
+
cmd += f'smurf-util {" ".join(args)}'
|
|
110
|
+
return subprocess.run(shlex.split(cmd), cwd=cwd, **run_kwargs)
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def check_epics_connection(epics_server, retry=False):
|
|
114
|
+
"""
|
|
115
|
+
Checks if we can connect to a specific epics server.
|
|
116
|
+
|
|
117
|
+
Args:
|
|
118
|
+
epics_server (string):
|
|
119
|
+
epics server to connect to
|
|
120
|
+
retry (bool):
|
|
121
|
+
If true, will continuously check until a connection has been
|
|
122
|
+
established.
|
|
123
|
+
"""
|
|
124
|
+
if retry:
|
|
125
|
+
print(f"Waiting for epics connection to {epics_server}", end='', flush=True)
|
|
126
|
+
while True:
|
|
127
|
+
x = util_run(
|
|
128
|
+
'caget', args=[f'{epics_server}:AMCc:enable'],
|
|
129
|
+
stdout=subprocess.PIPE, stderr=subprocess.STDOUT
|
|
130
|
+
)
|
|
131
|
+
if "True" in x.stdout.decode():
|
|
132
|
+
break
|
|
133
|
+
print('.', end='', flush=True)
|
|
134
|
+
|
|
135
|
+
print("\nConnected!")
|
|
136
|
+
return True
|
|
137
|
+
else:
|
|
138
|
+
x = util_run(
|
|
139
|
+
'caget', args=[f'{epics_server}:AMCc:enable'],
|
|
140
|
+
stdout=subprocess.PIPE, stderr=subprocess.STDOUT
|
|
141
|
+
)
|
|
142
|
+
return "True" in x.stdout.decode()
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def get_running_dockers(get_all=True):
|
|
146
|
+
"""
|
|
147
|
+
Gets all currently running dockers.
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
A list of tuples of the (cid, image, name)
|
|
151
|
+
"""
|
|
152
|
+
cmd = 'docker ps '
|
|
153
|
+
if get_all:
|
|
154
|
+
cmd += '-a '
|
|
155
|
+
x = subprocess.run(cmd.split(), stdout=subprocess.PIPE)
|
|
156
|
+
containers=[]
|
|
157
|
+
for line in x.stdout.decode().split('\n')[1:]:
|
|
158
|
+
if not line:
|
|
159
|
+
continue
|
|
160
|
+
|
|
161
|
+
cid, image, *_, cname = line.split()
|
|
162
|
+
containers.append((cid, image, cname))
|
|
163
|
+
|
|
164
|
+
return containers
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def kill_bad_dockers(slots, kill_monitor=False, names=[], images=[]):
|
|
168
|
+
"""
|
|
169
|
+
Kills relevant dockers for a given set of slots.
|
|
170
|
+
|
|
171
|
+
Args:
|
|
172
|
+
slots (list of ints):
|
|
173
|
+
list of slots which we should kill related dockers
|
|
174
|
+
kill_monitor (bool):
|
|
175
|
+
If true, will also kill the pysmurf_monitor
|
|
176
|
+
names (list of strings):
|
|
177
|
+
List of additional docker names we should kill
|
|
178
|
+
images (list of strings):
|
|
179
|
+
"""
|
|
180
|
+
|
|
181
|
+
print(f"Killing bad dockers for slots {slots}")
|
|
182
|
+
bad_names = names
|
|
183
|
+
|
|
184
|
+
for slot in slots:
|
|
185
|
+
# conflicting names created by jackhammer
|
|
186
|
+
bad_names.append(f'smurf-streamer-s{slot}')
|
|
187
|
+
bad_names.append(f'pysmurf-client-s{slot}')
|
|
188
|
+
|
|
189
|
+
# conflicting names created by shawnhammer
|
|
190
|
+
bad_names.append(f"pysmurf_s{slot}")
|
|
191
|
+
bad_names.append(f"smurf_server_s{slot}")
|
|
192
|
+
bad_names.append(f"pysmurf-ipython-slot{slot}")
|
|
193
|
+
|
|
194
|
+
if not use_hostmanager:
|
|
195
|
+
bad_names.append(f'ocs-pysmurf-s{slot}')
|
|
196
|
+
bad_names.append(get_pysmurf_controller_docker_service(slot))
|
|
197
|
+
|
|
198
|
+
if kill_monitor:
|
|
199
|
+
bad_names.append('ocs-pysmurf-monitor')
|
|
200
|
+
|
|
201
|
+
bad_images = images
|
|
202
|
+
|
|
203
|
+
containers = get_running_dockers()
|
|
204
|
+
|
|
205
|
+
for cid, full_image, name in containers:
|
|
206
|
+
# This will usually get the real image name.
|
|
207
|
+
# We want 'pysmurf-server' from 'tidair/pysmurf-server:R0.0.0'
|
|
208
|
+
image = full_image.split('/')[-1].split(':')[0]
|
|
209
|
+
|
|
210
|
+
if (name in bad_names) or (image in bad_images):
|
|
211
|
+
print(f"Stopping docker {name} ({image})")
|
|
212
|
+
subprocess.run(f'docker stop {cid}'.split())
|
|
213
|
+
subprocess.run(f'docker rm {cid}'.split())
|
|
214
|
+
|
|
215
|
+
def dump_docker_logs(slots, dump_rogue_tree=False):
|
|
216
|
+
"""
|
|
217
|
+
Dumps all docker logs and the rogue state for the specified
|
|
218
|
+
slots to text files.
|
|
219
|
+
|
|
220
|
+
Args:
|
|
221
|
+
slots (list of ints):
|
|
222
|
+
list of slots which we should dump the rogue state of.
|
|
223
|
+
"""
|
|
224
|
+
dump_dir = os.path.join(
|
|
225
|
+
'/data/logs',
|
|
226
|
+
str(time.time())[:5],
|
|
227
|
+
str(int(time.time()))
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
if not os.path.exists(dump_dir):
|
|
231
|
+
os.makedirs(dump_dir)
|
|
232
|
+
|
|
233
|
+
cprint(f"Dumping docker logs to {dump_dir}", style=TermColors.HEADER)
|
|
234
|
+
docker_state_file = os.path.join(dump_dir, 'docker_state.log')
|
|
235
|
+
with open(docker_state_file, 'w') as f:
|
|
236
|
+
print(f"Saving 'docker ps' to {docker_state_file}")
|
|
237
|
+
subprocess.run('docker ps -a'.split(), stdout=f, stderr=f)
|
|
238
|
+
|
|
239
|
+
for cid, image, name in get_running_dockers(get_all=True):
|
|
240
|
+
log_file = os.path.join(dump_dir, f'{name}.log')
|
|
241
|
+
with open(log_file, 'w') as f:
|
|
242
|
+
print(f"Saving {name} logs to {log_file}")
|
|
243
|
+
subprocess.run(f'docker logs {cid}'.split(), stdout=f, stderr=f)
|
|
244
|
+
|
|
245
|
+
if dump_rogue_tree:
|
|
246
|
+
dump_script = '/sodetlib/scripts/dump_rogue_state.py'
|
|
247
|
+
for slot in slots:
|
|
248
|
+
if check_epics_connection(f'smurf_server_s{slot}', retry=False):
|
|
249
|
+
out_file = os.path.join(dump_dir, f'rogue_state_s{slot}.yml')
|
|
250
|
+
cprint(f"Dumping s{slot} state to {out_file}", style=TermColors.HEADER)
|
|
251
|
+
util_run('python3', args=[dump_script, str(slot), out_file],
|
|
252
|
+
name=f'rogue_dump_s{slot}')
|
|
253
|
+
else:
|
|
254
|
+
print(f"Could not connect to epics for slot {slot}")
|
|
255
|
+
|
|
256
|
+
def run_on_shelf_manager(cmd_str):
|
|
257
|
+
""" Runs a command on the shelf manager. Takes in the command as a string"""
|
|
258
|
+
cmd = ['ssh', f'root@{sys_config["shelf_manager"]}', f'{cmd_str}']
|
|
259
|
+
print(cmd)
|
|
260
|
+
subprocess.run(cmd)
|
|
261
|
+
|
|
262
|
+
def enter_pysmurf(slot, agg=False):
|
|
263
|
+
"""
|
|
264
|
+
Enters into a pysmurf ipython session.
|
|
265
|
+
|
|
266
|
+
Args:
|
|
267
|
+
slot (int): slot to enter into
|
|
268
|
+
agg (bool): if true, use the matplotlib agg backend.
|
|
269
|
+
"""
|
|
270
|
+
print(f"Entering pysmurf on slot {slot}", flush=True)
|
|
271
|
+
name = f'pysmurf-ipython-slot{slot}'
|
|
272
|
+
container_names = [c[2] for c in get_running_dockers()]
|
|
273
|
+
active_container_names = [c[2] for c in get_running_dockers(get_all=False)]
|
|
274
|
+
|
|
275
|
+
if name in active_container_names:
|
|
276
|
+
print(f"Container {name} is already running... Attaching to it now")
|
|
277
|
+
cmd = shlex.split(f'docker attach {name}')
|
|
278
|
+
subprocess.run(cmd, cwd=cwd)
|
|
279
|
+
else:
|
|
280
|
+
if name in container_names:
|
|
281
|
+
print(f"Removing stopped docker {name}")
|
|
282
|
+
subprocess.run(f'docker rm {name}'.split())
|
|
283
|
+
|
|
284
|
+
util_run(
|
|
285
|
+
'python3',
|
|
286
|
+
args=f"/sodetlib/scripts/start_pysmurf_ipython.py -N {slot}".split(),
|
|
287
|
+
name=name, rm=False
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
def write_docker_env():
|
|
291
|
+
docker_env = sys_config.get('docker_env')
|
|
292
|
+
with open(os.path.join(cfg_dir, ".env"), "w") as env_file:
|
|
293
|
+
for k,v in docker_env.items():
|
|
294
|
+
env_file.write(f'{k}={v}\n')
|
|
295
|
+
|
|
296
|
+
def start_services(services, write_env=False):
|
|
297
|
+
"""
|
|
298
|
+
Starts docker-compose services
|
|
299
|
+
|
|
300
|
+
Args:
|
|
301
|
+
services (list or str): docker-compose services to restart
|
|
302
|
+
write_env(bool):
|
|
303
|
+
If true, will write sys_config['docker-env'] to the
|
|
304
|
+
$SMURF_CONFIG_DIR/.env file. This is so the most recent
|
|
305
|
+
environment variables can be used by standard docker-compose
|
|
306
|
+
commands.
|
|
307
|
+
"""
|
|
308
|
+
docker_env = sys_config.get('docker_env')
|
|
309
|
+
|
|
310
|
+
# Writes docker_env to $SMURF_CONFIG_DIR/.env so docker-compose still works
|
|
311
|
+
if write_env and docker_env is not None:
|
|
312
|
+
write_docker_env()
|
|
313
|
+
|
|
314
|
+
if isinstance(services, str):
|
|
315
|
+
services = [services]
|
|
316
|
+
|
|
317
|
+
cmd = f'{docker_compose_cmd} up -d'.split()
|
|
318
|
+
cmd.extend(services)
|
|
319
|
+
|
|
320
|
+
subprocess.run(cmd, cwd=cwd)
|
|
321
|
+
|
|
322
|
+
def controller_cmd(
|
|
323
|
+
slots: List[int],
|
|
324
|
+
action: Literal['up', 'down', 'logs']
|
|
325
|
+
) -> None:
|
|
326
|
+
"""
|
|
327
|
+
Brings pysmurf-controllers up or down for specified slots.
|
|
328
|
+
"""
|
|
329
|
+
if use_hostmanager:
|
|
330
|
+
cprint(
|
|
331
|
+
"sys_config['use_hostmanager'] is set to True, leaving controller dockers alone.",
|
|
332
|
+
style=TermColors.WARNING
|
|
333
|
+
)
|
|
334
|
+
return
|
|
335
|
+
|
|
336
|
+
print(f"Bringing controllers {action} for slots {slots}...")
|
|
337
|
+
services = []
|
|
338
|
+
for slot in slots:
|
|
339
|
+
services.append(get_pysmurf_controller_docker_service(slot))
|
|
340
|
+
|
|
341
|
+
if action == 'up':
|
|
342
|
+
cmd = f'{docker_compose_cmd} up -d'.split()
|
|
343
|
+
elif action == 'down':
|
|
344
|
+
cmd = f'{docker_compose_cmd} stop'.split()
|
|
345
|
+
elif action == 'logs':
|
|
346
|
+
cmd = 'docker logs -f'.split()
|
|
347
|
+
else:
|
|
348
|
+
raise ValueError(f"action {action} not recognized. Must be up or down.")
|
|
349
|
+
|
|
350
|
+
cmd.extend(services)
|
|
351
|
+
subprocess.run(cmd, cwd=cwd)
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
def controller_cmd_func(args) -> None:
|
|
355
|
+
"""
|
|
356
|
+
Entrypoint for the `jackhammer controller` command
|
|
357
|
+
"""
|
|
358
|
+
available_slots = sys_config['slot_order']
|
|
359
|
+
if args.slots is None or not args.slots:
|
|
360
|
+
slots = available_slots
|
|
361
|
+
else:
|
|
362
|
+
for slot in args.slots:
|
|
363
|
+
if slot not in available_slots:
|
|
364
|
+
raise ValueError(
|
|
365
|
+
f"Slot {slot} is not listed in the available slots: "
|
|
366
|
+
f"{available_slots}."
|
|
367
|
+
)
|
|
368
|
+
slots = args.slots
|
|
369
|
+
controller_cmd(slots, args.action)
|
|
370
|
+
|
|
371
|
+
|
|
372
|
+
########################################################################
|
|
373
|
+
# jackhammer subcommand entrypoints
|
|
374
|
+
########################################################################
|
|
375
|
+
|
|
376
|
+
# Entrypoint for jackhammer pysmurf
|
|
377
|
+
def pysmurf_func(args):
|
|
378
|
+
available_slots = sys_config['slot_order']
|
|
379
|
+
|
|
380
|
+
if args.slot is not None:
|
|
381
|
+
if args.slot not in available_slots:
|
|
382
|
+
print(f"Slot {args.slot} is not listed in the available smurf_slots in {sys_config_file}")
|
|
383
|
+
raise ValueError
|
|
384
|
+
slot = args.slot
|
|
385
|
+
else:
|
|
386
|
+
slot = available_slots[0]
|
|
387
|
+
|
|
388
|
+
enter_pysmurf(slot, agg=args.agg)
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
# Entrypoint for jackhammer hammer
|
|
392
|
+
def hammer_func(args):
|
|
393
|
+
# here we go....
|
|
394
|
+
if not args.slots:
|
|
395
|
+
slots = sys_config['slot_order']
|
|
396
|
+
else:
|
|
397
|
+
slots = args.slots
|
|
398
|
+
for s in slots:
|
|
399
|
+
if s not in sys_config['slot_order']:
|
|
400
|
+
raise ValueError(
|
|
401
|
+
f"Slot {s} is not valid for this system! Can only use "
|
|
402
|
+
f"slots in: {sys_config['slot_order']}")
|
|
403
|
+
|
|
404
|
+
reboot = not (args.no_reboot)
|
|
405
|
+
all_slots = len(slots) == len(sys_config['slot_order'])
|
|
406
|
+
|
|
407
|
+
reboot_str = "hard" if reboot else "soft"
|
|
408
|
+
cmd = input(f"You are {reboot_str}-resetting slots {slots}. "
|
|
409
|
+
"Are you sure (y/n)? ")
|
|
410
|
+
if cmd.lower() not in ["y", "yes"]:
|
|
411
|
+
return
|
|
412
|
+
|
|
413
|
+
# dump docker logs for debugging.
|
|
414
|
+
if not args.no_dump:
|
|
415
|
+
dump_docker_logs(slots)
|
|
416
|
+
|
|
417
|
+
cprint(f"Hammering for slots {slots}", True)
|
|
418
|
+
|
|
419
|
+
cprint("Killing conflicting dockers", style=TermColors.HEADER)
|
|
420
|
+
kill_bad_dockers(slots, kill_monitor=False)
|
|
421
|
+
|
|
422
|
+
if all_slots:
|
|
423
|
+
cprint("Restarting smurf-util", style=TermColors.HEADER)
|
|
424
|
+
# Restarts smurf-util to clean up any running processes
|
|
425
|
+
subprocess.run('docker stop smurf-util'.split(), cwd=cwd)
|
|
426
|
+
subprocess.run('docker rm smurf-util'.split(), cwd=cwd)
|
|
427
|
+
start_services('smurf-util', write_env=True)
|
|
428
|
+
|
|
429
|
+
# Sets fan levels on crate
|
|
430
|
+
cprint("Setting fan levels", style=TermColors.HEADER)
|
|
431
|
+
setup_fans()
|
|
432
|
+
|
|
433
|
+
if reboot:
|
|
434
|
+
cprint(f"Rebooting slots: {slots}", style=TermColors.HEADER)
|
|
435
|
+
deactivate_commands = []
|
|
436
|
+
activate_commands = []
|
|
437
|
+
for slot in slots:
|
|
438
|
+
deactivate_commands.append(f'clia deactivate board {slot}')
|
|
439
|
+
activate_commands.append(f'clia activate board {slot}')
|
|
440
|
+
|
|
441
|
+
print(f"Deactivating carriers: {slots}")
|
|
442
|
+
run_on_shelf_manager('; '.join(deactivate_commands))
|
|
443
|
+
|
|
444
|
+
print("Waiting 5 seconds before re-activating carriers")
|
|
445
|
+
time.sleep(5)
|
|
446
|
+
|
|
447
|
+
print(f"Activating carriers: {slots}")
|
|
448
|
+
run_on_shelf_manager('; '.join(activate_commands))
|
|
449
|
+
|
|
450
|
+
print("Waiting for carriers to come back online (this takes a bit)")
|
|
451
|
+
for slot in slots:
|
|
452
|
+
# ip = f'10.0.{sys_config["crate_id"]}.{slot + 100}'
|
|
453
|
+
ip = get_slot_ip(slot)
|
|
454
|
+
subprocess.run(['ping_carrier', ip])
|
|
455
|
+
else:
|
|
456
|
+
print("Skipping reboot process")
|
|
457
|
+
|
|
458
|
+
#Brings up all smurf-streamer dockers
|
|
459
|
+
cprint('Bringing up smurf dockers', style=TermColors.HEADER)
|
|
460
|
+
if all_slots:
|
|
461
|
+
services = ['smurf-jupyter', 'smurf-util']
|
|
462
|
+
if not use_hostmanager:
|
|
463
|
+
services.append('ocs-pysmurf-monitor')
|
|
464
|
+
|
|
465
|
+
start_sync_dockers()
|
|
466
|
+
else:
|
|
467
|
+
services = []
|
|
468
|
+
|
|
469
|
+
for slot in slots:
|
|
470
|
+
services.append(f'smurf-streamer-s{slot}')
|
|
471
|
+
|
|
472
|
+
start_services(services)
|
|
473
|
+
start_sync_dockers()
|
|
474
|
+
controller_cmd(slots, 'up')
|
|
475
|
+
|
|
476
|
+
# Waits for streamer-dockers to start
|
|
477
|
+
print("Waiting for server dockers to connect. This might take a few minutes...")
|
|
478
|
+
for slot in slots:
|
|
479
|
+
epics_server = f'smurf_server_s{slot}'
|
|
480
|
+
check_epics_connection(epics_server, retry=True)
|
|
481
|
+
|
|
482
|
+
if reboot and not args.skip_setup:
|
|
483
|
+
cprint("Configuring pysmurf", style=TermColors.HEADER)
|
|
484
|
+
setup_smurfs(slots)
|
|
485
|
+
print("Finished configuring pysmurf!")
|
|
486
|
+
|
|
487
|
+
# Enters into an ipython notebook for the first specified slot
|
|
488
|
+
cprint(f"Entering pysmurf slot {slots[0]}", style=TermColors.HEADER)
|
|
489
|
+
enter_pysmurf(slots[0], agg=args.agg)
|
|
490
|
+
|
|
491
|
+
def setup_smurfs(slots):
|
|
492
|
+
"""
|
|
493
|
+
Runs S.setup on one or more slots in parallel
|
|
494
|
+
|
|
495
|
+
Args
|
|
496
|
+
-----
|
|
497
|
+
slots : list
|
|
498
|
+
List of slots to run on
|
|
499
|
+
"""
|
|
500
|
+
threads = []
|
|
501
|
+
for s in slots:
|
|
502
|
+
print(f"Configuring pysmurf on slot {s}...")
|
|
503
|
+
kw = {
|
|
504
|
+
'args': ('python3',),
|
|
505
|
+
'kwargs': {'args': f'/sodetlib/scripts/setup_pysmurf.py -N {s}'.split()}
|
|
506
|
+
|
|
507
|
+
}
|
|
508
|
+
th = threading.Thread(target=util_run, **kw )
|
|
509
|
+
th.start()
|
|
510
|
+
threads.append(th)
|
|
511
|
+
|
|
512
|
+
for th in threads:
|
|
513
|
+
th.join()
|
|
514
|
+
|
|
515
|
+
def start_sync_dockers():
|
|
516
|
+
"""
|
|
517
|
+
Begins suprsync docker services (by choosing all docker services that have
|
|
518
|
+
"sync" in their name).
|
|
519
|
+
"""
|
|
520
|
+
if use_hostmanager:
|
|
521
|
+
cprint(
|
|
522
|
+
"sys_config['use_hostmanager'] is set to True, leaving sync dockers alone.",
|
|
523
|
+
style=TermColors.WARNING
|
|
524
|
+
)
|
|
525
|
+
return
|
|
526
|
+
|
|
527
|
+
services = get_docker_services()
|
|
528
|
+
start_services([s for s in services if 'sync' in s])
|
|
529
|
+
|
|
530
|
+
def start_sync_func(args):
|
|
531
|
+
"""Entrypoint for ``jackhammer start-sync``"""
|
|
532
|
+
start_sync_dockers()
|
|
533
|
+
|
|
534
|
+
|
|
535
|
+
# Entrypoint for jackhammer setup
|
|
536
|
+
def setup_func(args):
|
|
537
|
+
if not args.slots:
|
|
538
|
+
slots = sys_config['slot_order']
|
|
539
|
+
else:
|
|
540
|
+
slots = args.slots
|
|
541
|
+
setup_smurfs(slots)
|
|
542
|
+
|
|
543
|
+
|
|
544
|
+
# Entrypoint for jackhammer logs
|
|
545
|
+
def log_func(args):
|
|
546
|
+
cmd = shlex.split(f'{docker_compose_cmd} logs -f')
|
|
547
|
+
cmd.extend(args.log_args)
|
|
548
|
+
subprocess.run(cmd, cwd=cwd)
|
|
549
|
+
|
|
550
|
+
|
|
551
|
+
# Entrypoint for jackhamer util
|
|
552
|
+
def util_func(args):
|
|
553
|
+
util_run('bash', rm=(not args.detached))
|
|
554
|
+
|
|
555
|
+
|
|
556
|
+
def gui_func(args):
|
|
557
|
+
if args.port is not None:
|
|
558
|
+
server_port = args.port
|
|
559
|
+
else:
|
|
560
|
+
available_slots = sys_config['slot_order']
|
|
561
|
+
if args.slot is not None:
|
|
562
|
+
if args.slot not in available_slots:
|
|
563
|
+
print(f"Slot {args.slot} is not listed in the available "
|
|
564
|
+
f"smurf_slots in {sys_config_file}")
|
|
565
|
+
raise ValueError
|
|
566
|
+
slot = args.slot
|
|
567
|
+
else:
|
|
568
|
+
slot = available_slots[0]
|
|
569
|
+
server_port = 9000 + 2*slot
|
|
570
|
+
|
|
571
|
+
sodetlib_root = os.environ.get('SODETLIB_ROOT', '/home/cryo/sodetlib')
|
|
572
|
+
script_path = os.path.join(sodetlib_root, 'hammers', 'run_gui.sh')
|
|
573
|
+
subprocess.run(f'sh {script_path} {server_port}'.split())
|
|
574
|
+
|
|
575
|
+
|
|
576
|
+
# Entrypoint for jackhammer dump
|
|
577
|
+
def dump_func(args):
|
|
578
|
+
if not args.slots:
|
|
579
|
+
slots = sys_config['slot_order']
|
|
580
|
+
else:
|
|
581
|
+
slots = args.slots
|
|
582
|
+
for s in slots:
|
|
583
|
+
if s not in sys_config['slot_order']:
|
|
584
|
+
raise ValueError(
|
|
585
|
+
f"Slot {s} is not valid for this system! Can only use "
|
|
586
|
+
f"slots in: {sys_config['slot_order']}")
|
|
587
|
+
|
|
588
|
+
dump_docker_logs(slots, dump_rogue_tree=args.dump_rogue)
|
|
589
|
+
|
|
590
|
+
|
|
591
|
+
def deactivate_func(args):
|
|
592
|
+
cmds = [
|
|
593
|
+
f'clia deactivate board {s}' for s in args.slots
|
|
594
|
+
]
|
|
595
|
+
run_on_shelf_manager('; '.join(cmds))
|
|
596
|
+
|
|
597
|
+
def activate_func(args):
|
|
598
|
+
cmds = [
|
|
599
|
+
f'clia activate board {s}' for s in args.slots
|
|
600
|
+
]
|
|
601
|
+
run_on_shelf_manager('; '.join(cmds))
|
|
602
|
+
|
|
603
|
+
|
|
604
|
+
# Entrypoint for jackhammer write-env
|
|
605
|
+
def write_env_func(args):
|
|
606
|
+
write_docker_env()
|
|
607
|
+
|
|
608
|
+
def setup_fans():
|
|
609
|
+
if 'smurf_fans' not in sys_config:
|
|
610
|
+
# Old way of setting up fans
|
|
611
|
+
cprint(
|
|
612
|
+
"'smurf_fans' key not found in sys config. Setting fan levels using "
|
|
613
|
+
"'max_fan_level', however this allows fan speed to change over "
|
|
614
|
+
"time."
|
|
615
|
+
)
|
|
616
|
+
|
|
617
|
+
min_fan_level = sys_config.get('min_fan_level')
|
|
618
|
+
init_fan_level = sys_config.get('init_fan_level')
|
|
619
|
+
|
|
620
|
+
if (min_fan_level is None) or (init_fan_level is None):
|
|
621
|
+
# Run with old way, using max_fan_level to set both minfanlevel and
|
|
622
|
+
# init
|
|
623
|
+
min_fan_level = sys_config['max_fan_level']
|
|
624
|
+
init_fan_level = sys_config['max_fan_level']
|
|
625
|
+
cprint("Using max_fan_level sys_config value to set minfanlevel and "
|
|
626
|
+
"initial level. If you've switched to a Comtel crate, please "
|
|
627
|
+
"change your sys_config to use the `min_fan_level` and "
|
|
628
|
+
"`init_fan_level` variables", style=TermColors.WARNING)
|
|
629
|
+
|
|
630
|
+
cmd = f'clia minfanlevel {min_fan_level}; '
|
|
631
|
+
cmd += f'clia setfanlevel all {init_fan_level}'
|
|
632
|
+
print(f"Setting crate fans to {init_fan_level}...")
|
|
633
|
+
run_on_shelf_manager(cmd)
|
|
634
|
+
return
|
|
635
|
+
|
|
636
|
+
fans = sys_config['smurf_fans']
|
|
637
|
+
cmd = ''
|
|
638
|
+
for addr in fans['addresses']:
|
|
639
|
+
a, b = addr
|
|
640
|
+
|
|
641
|
+
cmd += f"clia setfanpolicy {a} {b} {fans['policy']}; "
|
|
642
|
+
cmd += f"clia setfanlevel all {fans['speed']};"
|
|
643
|
+
run_on_shelf_manager(cmd)
|
|
644
|
+
|
|
645
|
+
# Entrypoint for jackhammer setup-fans
|
|
646
|
+
def setup_fans_func(args):
|
|
647
|
+
setup_fans()
|
|
648
|
+
|
|
649
|
+
if __name__ == '__main__':
|
|
650
|
+
parser = argparse.ArgumentParser()
|
|
651
|
+
subparsers = parser.add_subparsers()
|
|
652
|
+
|
|
653
|
+
########### Jackhammer pysmurf parser ############
|
|
654
|
+
pysmurf_parser = subparsers.add_parser('pysmurf',
|
|
655
|
+
help="Drops user into ipython session with pysmurf already initialized"
|
|
656
|
+
)
|
|
657
|
+
pysmurf_parser.set_defaults(func=pysmurf_func)
|
|
658
|
+
pysmurf_parser.add_argument('slot', type=int, nargs="?",
|
|
659
|
+
help="Slot number of pysmurf to connect to. If left blank it will "
|
|
660
|
+
"try to figure it out from the sys_config file."
|
|
661
|
+
)
|
|
662
|
+
pysmurf_parser.add_argument('--config-file', '-c',
|
|
663
|
+
help="Pysmurf config file. Defaults to what is specified in sys config file."
|
|
664
|
+
)
|
|
665
|
+
pysmurf_parser.add_argument('--setup', '-s', action='store_true',
|
|
666
|
+
help="If specified, pysmurf will start with setup=True."
|
|
667
|
+
)
|
|
668
|
+
pysmurf_parser.add_argument('--agg', action='store_true',
|
|
669
|
+
help="If specified, matplotlib will use the agg backend. This can be "
|
|
670
|
+
"helpful if you are connecting over ssh without a display."
|
|
671
|
+
)
|
|
672
|
+
|
|
673
|
+
########### Jackhammer hammer parser ############
|
|
674
|
+
hammer_parser = subparsers.add_parser("hammer")
|
|
675
|
+
hammer_parser.set_defaults(func=hammer_func)
|
|
676
|
+
hammer_parser.add_argument('slots', nargs='*', type=int,
|
|
677
|
+
help="Specifies the slots to hammer")
|
|
678
|
+
hammer_parser.add_argument('--no-reboot', '--soft', '-n',
|
|
679
|
+
action='store_true',
|
|
680
|
+
help="If True, will not reboot slots.")
|
|
681
|
+
hammer_parser.add_argument('--no-dump', action='store_true',
|
|
682
|
+
help="If True, will not dump logs.")
|
|
683
|
+
hammer_parser.add_argument('--agg', action='store_true')
|
|
684
|
+
hammer_parser.add_argument('--skip-setup', action='store_true',
|
|
685
|
+
help="Skip pysmurf setup functions. If `--soft` is set, defaults to True.")
|
|
686
|
+
hammer_parser.add_argument('--dump-rogue', action='store_true',
|
|
687
|
+
help="If true, will attempt to connect to pysmurf smurf and dump the rogue tree")
|
|
688
|
+
|
|
689
|
+
setup_parser = subparsers.add_parser("setup")
|
|
690
|
+
setup_parser.set_defaults(func=setup_func)
|
|
691
|
+
setup_parser.add_argument('slots', nargs='*', type=int,
|
|
692
|
+
help="Specifies the slots to setup")
|
|
693
|
+
|
|
694
|
+
|
|
695
|
+
########### Jackhammer logs parser ############
|
|
696
|
+
log_parser = subparsers.add_parser('logs')
|
|
697
|
+
log_parser.add_argument('log_args', nargs="*", type=str,
|
|
698
|
+
help="args passed to docker compose logs")
|
|
699
|
+
log_parser.set_defaults(func=log_func)
|
|
700
|
+
|
|
701
|
+
########### Jackhammer util parser ############
|
|
702
|
+
util_parser = subparsers.add_parser('util')
|
|
703
|
+
util_parser.add_argument('--detached', '-d', action='store_true')
|
|
704
|
+
util_parser.set_defaults(func=util_func)
|
|
705
|
+
|
|
706
|
+
########### Jackhammer gui parser ###########
|
|
707
|
+
gui_parser = subparsers.add_parser('gui')
|
|
708
|
+
gui_parser.add_argument('slot', nargs='?', type=int)
|
|
709
|
+
gui_parser.add_argument('--port', '-p', type=int, help='gui server port')
|
|
710
|
+
gui_parser.set_defaults(func=gui_func)
|
|
711
|
+
|
|
712
|
+
########### Jackhammer dump parser ###########
|
|
713
|
+
dump_parser = subparsers.add_parser('dump', help='Dumps all docker logs')
|
|
714
|
+
dump_parser.add_argument('slots', nargs='*', type=int,
|
|
715
|
+
help='Specifies the slots to dump rogue states')
|
|
716
|
+
dump_parser.add_argument('--dump-rogue', action='store_true',
|
|
717
|
+
help="If true, will attempt to connect to pysmurf smurf and dump the rogue tree")
|
|
718
|
+
dump_parser.set_defaults(func=dump_func)
|
|
719
|
+
|
|
720
|
+
########### Jackhammer deactivate parser ###########
|
|
721
|
+
deactivate_parser = subparsers.add_parser('deactivate', help='Deactivates slots')
|
|
722
|
+
deactivate_parser.add_argument('slots', nargs='+', type=int,
|
|
723
|
+
help='Specifies the slots to deactivate')
|
|
724
|
+
deactivate_parser.set_defaults(func=deactivate_func)
|
|
725
|
+
|
|
726
|
+
########### Jackhammer activate parser ###########
|
|
727
|
+
activate_parser = subparsers.add_parser('activate', help='Activates slots')
|
|
728
|
+
activate_parser.add_argument('slots', nargs='+', type=int,
|
|
729
|
+
help='Specifies the slots to activate')
|
|
730
|
+
activate_parser.set_defaults(func=activate_func)
|
|
731
|
+
|
|
732
|
+
########### Jackhammer write-env parser ###########
|
|
733
|
+
write_env_parser = subparsers.add_parser('write-env', help='writes docker-env to .env file')
|
|
734
|
+
write_env_parser.set_defaults(func=write_env_func)
|
|
735
|
+
|
|
736
|
+
########### Jackhammer start-sync parser ###########
|
|
737
|
+
start_sync_parser = subparsers.add_parser('start-sync')
|
|
738
|
+
start_sync_parser.set_defaults(func=start_sync_func)
|
|
739
|
+
|
|
740
|
+
########### Jackhammer setup-fans parser ###########
|
|
741
|
+
start_sync_parser = subparsers.add_parser(
|
|
742
|
+
'setup-fans',
|
|
743
|
+
help="Sets fan speeds and policy on crate based on sys-config"
|
|
744
|
+
)
|
|
745
|
+
start_sync_parser.set_defaults(func=setup_fans_func)
|
|
746
|
+
|
|
747
|
+
########### Jackhammer setup-fans parser ###########
|
|
748
|
+
controller_cmd_parser = subparsers.add_parser(
|
|
749
|
+
'controller',
|
|
750
|
+
help="Sets fan speeds and policy on crate based on sys-config"
|
|
751
|
+
)
|
|
752
|
+
controller_cmd_parser.add_argument('action', choices=['up', 'down', 'logs'])
|
|
753
|
+
controller_cmd_parser.add_argument('slots', type=int, nargs='*', default=None)
|
|
754
|
+
controller_cmd_parser.set_defaults(func=controller_cmd_func)
|
|
755
|
+
|
|
756
|
+
|
|
757
|
+
args = parser.parse_args()
|
|
758
|
+
if hasattr(args, 'func'):
|
|
759
|
+
args.func(args)
|
|
760
|
+
else:
|
|
761
|
+
parser.print_help()
|