fastcs-pandablocks 0.2.0a3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,625 @@
1
+ import asyncio
2
+ import enum
3
+ import logging
4
+ import os
5
+ from asyncio import CancelledError
6
+ from collections import deque
7
+ from collections.abc import AsyncGenerator, Callable
8
+ from dataclasses import dataclass
9
+ from importlib.util import find_spec
10
+ from pathlib import Path
11
+ from typing import Union
12
+
13
+ import numpy as np
14
+ from fastcs.attributes import AttrR, AttrRW
15
+ from fastcs.controllers import Controller
16
+ from fastcs.datatypes import Bool, Enum, Float, Int, String, Table
17
+ from numpy.typing import DTypeLike
18
+ from pandablocks.hdf import (
19
+ EndData,
20
+ FrameData,
21
+ Pipeline,
22
+ StartData,
23
+ create_default_pipeline,
24
+ stop_pipeline,
25
+ )
26
+ from pandablocks.responses import Data, EndReason, ReadyData
27
+
28
+ from fastcs_pandablocks.types import PandaName
29
+
30
+ HDFReceived = Union[ReadyData, StartData, FrameData, EndData]
31
+
32
+
33
+ class CaptureMode(enum.Enum):
34
+ """
35
+ The mode which the circular buffer will use to flush.
36
+ """
37
+
38
+ #: Wait till N frames are recieved then write them
39
+ #: and finish capture
40
+ FIRST_N = 0
41
+
42
+ #: On EndData write the last N frames
43
+ LAST_N = 1
44
+
45
+ #: Write data as received until Capture set to 0
46
+ FOREVER = 2
47
+
48
+
49
+ class NumCapturedSetter(Pipeline):
50
+ def __init__(self, number_captured_setter: Callable) -> None:
51
+ self.number_captured_setter = number_captured_setter
52
+ super().__init__()
53
+
54
+ # TODO: Sync expected but async passed. Fix.
55
+ self.what_to_do = {int: self.set_record}
56
+
57
+ def set_record(self, value: int):
58
+ asyncio.run(self.number_captured_setter(value))
59
+
60
+
61
+ class HDF5Buffer:
62
+ _buffer_index = None
63
+ start_data = None
64
+ number_of_received_rows = 0
65
+ finish_capturing = False
66
+ number_of_rows_in_circular_buffer = 0
67
+
68
+ def __init__(
69
+ self,
70
+ capture_mode: CaptureMode,
71
+ filepath: Path,
72
+ number_of_rows_to_capture: int,
73
+ status_message_setter: Callable,
74
+ number_received_setter: Callable,
75
+ number_captured_setter_pipeline: NumCapturedSetter,
76
+ dataset_name_cache: dict[str, dict[str, str]],
77
+ ):
78
+ # Only one filename - user must stop capture and set new FileName/FilePath
79
+ # for new files
80
+
81
+ self.circular_buffer: deque[FrameData] = deque()
82
+ self.capture_mode = capture_mode
83
+
84
+ match capture_mode:
85
+ case CaptureMode.FIRST_N:
86
+ self._handle_FrameData = self._capture_first_n
87
+ case CaptureMode.LAST_N:
88
+ self._handle_FrameData = self._capture_last_n
89
+ case CaptureMode.FOREVER:
90
+ self._handle_FrameData = self._capture_forever
91
+ case _:
92
+ raise RuntimeError("Invalid capture mode")
93
+
94
+ self.filepath = filepath
95
+ self.number_of_rows_to_capture = number_of_rows_to_capture
96
+ self.status_message_setter = status_message_setter
97
+ self.number_received_setter = number_received_setter
98
+ self.number_captured_setter_pipeline = number_captured_setter_pipeline
99
+
100
+ self.dataset_name_cache = dataset_name_cache
101
+
102
+ if (
103
+ self.capture_mode == CaptureMode.LAST_N
104
+ and self.number_of_rows_to_capture <= 0
105
+ ):
106
+ raise RuntimeError("Number of rows to capture must be > 0 on LAST_N mode")
107
+
108
+ self.start_pipeline()
109
+
110
+ def __del__(self):
111
+ if self.pipeline[0].is_alive():
112
+ stop_pipeline(self.pipeline)
113
+
114
+ def put_data_to_file(self, data: HDFReceived):
115
+ try:
116
+ self.pipeline[0].queue.put_nowait(data)
117
+ except Exception as ex:
118
+ logging.exception(f"Failed to save the data to HDF5 file: {ex}")
119
+
120
+ def start_pipeline(self):
121
+ self.pipeline = create_default_pipeline(
122
+ iter([str(self.filepath)]),
123
+ self.dataset_name_cache,
124
+ self.number_captured_setter_pipeline,
125
+ )
126
+
127
+ def _handle_StartData(self, data: StartData):
128
+ if self.start_data and data != self.start_data:
129
+ # PandA was disarmed, had config changed, and rearmed.
130
+ # Cannot process to the same file with different start data.
131
+ logging.error(
132
+ "New start data detected, differs from previous start "
133
+ "data for this file. Aborting HDF5 data capture."
134
+ )
135
+
136
+ self.status_message_setter(
137
+ "Mismatched StartData packet for file",
138
+ )
139
+ self.put_data_to_file(
140
+ EndData(self.number_of_received_rows, EndReason.START_DATA_MISMATCH)
141
+ )
142
+
143
+ self.finish_capturing = True
144
+
145
+ # Only pass StartData to pipeline if we haven't previously
146
+ else:
147
+ # In LAST_N mode, wait till the end of capture to write
148
+ # the StartData to file.
149
+ # In FOREVER mode write the StartData to file if it's the first received.
150
+ if (
151
+ self.capture_mode == CaptureMode.FIRST_N
152
+ or self.capture_mode == CaptureMode.FOREVER
153
+ and not self.start_data
154
+ ):
155
+ self.put_data_to_file(data)
156
+
157
+ self.start_data = data
158
+
159
+ async def _capture_first_n(self, data: FrameData):
160
+ """
161
+ Capture framedata as it comes in. Stop when number of frames exceeds
162
+ number_of_rows_to_capture, and cut off the data so that it's length
163
+ number_of_rows_to_capture.
164
+ """
165
+ self.number_of_received_rows += len(data.data)
166
+
167
+ if (
168
+ self.number_of_rows_to_capture > 0
169
+ and self.number_of_received_rows > self.number_of_rows_to_capture
170
+ ):
171
+ # Discard extra collected data points if necessary
172
+ data.data = data.data[
173
+ : self.number_of_rows_to_capture - self.number_of_received_rows
174
+ ].copy()
175
+ self.number_of_received_rows = self.number_of_rows_to_capture
176
+
177
+ self.put_data_to_file(data)
178
+ await self.number_received_setter(self.number_of_received_rows)
179
+
180
+ if (
181
+ self.number_of_rows_to_capture > 0
182
+ and self.number_of_received_rows == self.number_of_rows_to_capture
183
+ ):
184
+ # Reached configured capture limit, stop the file
185
+ logging.info(
186
+ f"Requested number of frames ({self.number_of_rows_to_capture}) "
187
+ "captured, disabling Capture."
188
+ )
189
+ self.status_message_setter("Requested number of frames captured")
190
+ self.put_data_to_file(EndData(self.number_of_received_rows, EndReason.OK))
191
+ self.finish_capturing = True
192
+
193
+ async def _capture_forever(self, data: FrameData):
194
+ self.put_data_to_file(data)
195
+ self.number_of_received_rows += len(data.data)
196
+ await self.number_received_setter(self.number_of_received_rows)
197
+
198
+ async def _capture_last_n(self, data: FrameData):
199
+ """
200
+ Append every FrameData to a buffer until the number of rows equals
201
+ `:NumCapture`. Then rewrite the data circularly.
202
+
203
+ Only write the data once PCAP is received.
204
+ """
205
+ self.circular_buffer.append(data)
206
+ self.number_of_received_rows += len(data.data)
207
+ self.number_of_rows_in_circular_buffer += len(data.data)
208
+
209
+ if self.number_of_rows_in_circular_buffer > self.number_of_rows_to_capture:
210
+ await self.status_message_setter(
211
+ "NumCapture received, rewriting first frames received"
212
+ )
213
+
214
+ else:
215
+ await self.status_message_setter("Filling buffer to NumReceived")
216
+
217
+ while self.number_of_rows_in_circular_buffer > self.number_of_rows_to_capture:
218
+ first_frame_data = self.circular_buffer.popleft()
219
+ first_frame_data_length = len(first_frame_data.data)
220
+
221
+ if first_frame_data_length > self.number_of_rows_to_capture:
222
+ # More data than we want to capture, all in a single FrameData
223
+ # We can just slice with the NumCapture since this has to be the
224
+ # only FrameData in the buffer at this point
225
+ assert len(self.circular_buffer) == 0
226
+ shrinked_data = first_frame_data.data[
227
+ -self.number_of_rows_to_capture :
228
+ ].copy()
229
+ first_frame_data.data = shrinked_data
230
+ self.circular_buffer.appendleft(first_frame_data)
231
+ self.number_of_rows_in_circular_buffer = self.number_of_rows_to_capture
232
+ elif (
233
+ first_frame_data_length
234
+ > self.number_of_rows_in_circular_buffer
235
+ - self.number_of_rows_to_capture
236
+ ):
237
+ # We can slice from the beginning of the FrameData to have the desired
238
+ # number of rows
239
+ indices_to_discard = (
240
+ self.number_of_rows_in_circular_buffer
241
+ - self.number_of_rows_to_capture
242
+ )
243
+ shrinked_data = first_frame_data.data[indices_to_discard:].copy()
244
+ first_frame_data.data = shrinked_data
245
+ self.circular_buffer.appendleft(first_frame_data)
246
+ self.number_of_rows_in_circular_buffer -= indices_to_discard
247
+ assert (
248
+ self.number_of_rows_in_circular_buffer
249
+ == self.number_of_rows_to_capture
250
+ )
251
+ else:
252
+ # If we remove the enire first frame data then the buffer will still
253
+ # be too big, or it will be exactly the number of rows we want
254
+ self.number_of_rows_in_circular_buffer -= first_frame_data_length
255
+
256
+ await self.number_received_setter(self.number_of_received_rows)
257
+
258
+ def _handle_EndData(self, data: EndData):
259
+ match self.capture_mode:
260
+ case CaptureMode.LAST_N:
261
+ # In LAST_N only write FrameData if the EndReason is OK
262
+ if data.reason not in (EndReason.OK, EndReason.MANUALLY_STOPPED):
263
+ self.status_message_setter(
264
+ f"Stopped capturing with reason {data.reason}, "
265
+ "skipping writing of buffered frames"
266
+ )
267
+ self.finish_capturing = True
268
+ return
269
+
270
+ self.status_message_setter(
271
+ "Finishing capture, writing buffered frames to file"
272
+ )
273
+ assert self.start_data is not None
274
+ self.put_data_to_file(self.start_data)
275
+ for frame_data in self.circular_buffer:
276
+ self.put_data_to_file(frame_data)
277
+
278
+ case CaptureMode.FOREVER:
279
+ if data.reason != EndReason.MANUALLY_STOPPED:
280
+ self.status_message_setter(
281
+ "Finished capture, waiting for next ReadyData"
282
+ )
283
+ return
284
+
285
+ case CaptureMode.FIRST_N:
286
+ pass # Frames will have already been written in FirstN
287
+
288
+ case _:
289
+ raise RuntimeError("Unknown capture mode")
290
+
291
+ self.status_message_setter("Finished capture")
292
+ self.finish_capturing = True
293
+ self.put_data_to_file(data)
294
+
295
+ async def handle_data(self, data: HDFReceived):
296
+ match data:
297
+ case ReadyData():
298
+ pass
299
+ case StartData():
300
+ await self.status_message_setter("Starting capture")
301
+ self._handle_StartData(data)
302
+ case FrameData():
303
+ await self._handle_FrameData(data)
304
+ case EndData():
305
+ self._handle_EndData(data)
306
+ case _:
307
+ raise RuntimeError(
308
+ f"Data was recieved that was of type {type(data)}, not"
309
+ "StartData, EndData, ReadyData, or FrameData"
310
+ )
311
+
312
+
313
+ @dataclass
314
+ class DatasetAttributes:
315
+ """A dataset name and capture mode"""
316
+
317
+ name: AttrRW[str]
318
+ capture: AttrRW[enum.Enum]
319
+
320
+
321
+ class DatasetTableWrapper:
322
+ """Used for outputing formatted dataset names in the HDF5 writer, and creating
323
+ and updating the HDF5 `DATASETS` table attribute."""
324
+
325
+ NUMPY_TYPE: list[tuple[str, DTypeLike]] = [
326
+ ("name", np.dtype("S1000")),
327
+ ("dtype", np.dtype("S1000")),
328
+ ]
329
+
330
+ def __init__(
331
+ self,
332
+ dataset_cache: dict[PandaName, DatasetAttributes],
333
+ ):
334
+ self._dataset_cache = dataset_cache
335
+
336
+ def hdf_writer_names(self) -> dict[str, dict[str, str]]:
337
+ """Formats the current dataset names for use in the HDFWriter"""
338
+
339
+ hdf_names: dict[str, dict[str, str]] = {}
340
+ for panda_name, dataset in self._dataset_cache.items():
341
+ capture_str_value = dataset.capture.get().value
342
+ name_str_value = dataset.name.get()
343
+ if not name_str_value or capture_str_value == "No":
344
+ continue
345
+
346
+ hdf_names[str(panda_name)] = hdf_name = {}
347
+
348
+ hdf_name[capture_str_value.split(" ")[-1]] = name_str_value
349
+ # Suffix -min and -max if both are present
350
+ if "Min Max" in capture_str_value:
351
+ hdf_name["Min"] = f"{name_str_value}-min"
352
+ hdf_name["Max"] = f"{name_str_value}-max"
353
+
354
+ return hdf_names
355
+
356
+ def get_numpy_table(self) -> np.ndarray:
357
+ return np.array(
358
+ [
359
+ (dataset.name.get(), "float64")
360
+ for dataset in self._dataset_cache.values()
361
+ if dataset.name.get() and dataset.capture.get() != "No"
362
+ ],
363
+ dtype=self.NUMPY_TYPE,
364
+ )
365
+
366
+ def set_on_update_callback(self, table_attribute: AttrR):
367
+ async def callback(value):
368
+ await table_attribute.update(self.get_numpy_table())
369
+
370
+ for dataset_attributes in self._dataset_cache.values():
371
+ dataset_attributes.name.set_update_callback(callback)
372
+ dataset_attributes.capture.set_update_callback(callback)
373
+
374
+
375
+ class DataController(Controller):
376
+ """Class to create and control the records that handle HDF5 processing"""
377
+
378
+ hdf_directory = AttrRW(String(), description="File path for HDF5 files.")
379
+
380
+ create_directory = AttrRW(
381
+ Int(),
382
+ description="Directory creation depth",
383
+ initial_value=0,
384
+ )
385
+
386
+ directory_exists = AttrR(
387
+ Bool(), description="Directory exists", initial_value=False
388
+ )
389
+
390
+ hdf_file_name = AttrRW(
391
+ String(),
392
+ description="File name prefix for HDF5 files",
393
+ initial_value="",
394
+ )
395
+
396
+ hdf_full_file_path = AttrR(
397
+ String(),
398
+ description="Full HDF5 file name with directory",
399
+ initial_value="",
400
+ )
401
+
402
+ num_capture = AttrRW(
403
+ Int(min=0),
404
+ description="Number of frames to capture. 0=infinite",
405
+ initial_value=0, # Infinite capture
406
+ )
407
+
408
+ num_captured = AttrR(
409
+ Int(),
410
+ description="Number of frames written to file.",
411
+ initial_value=0,
412
+ )
413
+
414
+ num_received = AttrR(
415
+ Int(),
416
+ description="Number of frames received from panda.",
417
+ initial_value=0,
418
+ )
419
+ flush_period = AttrRW(
420
+ Float(units="s"),
421
+ description="Frequency that data is flushed (seconds).",
422
+ initial_value=1.0,
423
+ )
424
+
425
+ capture = AttrRW(
426
+ Bool(), description="Start/stop HDF5 capture.", initial_value=False
427
+ )
428
+ capture_mode = AttrRW(
429
+ Enum(CaptureMode),
430
+ description="Choose how to hdf writer flushes",
431
+ initial_value=CaptureMode.FIRST_N,
432
+ )
433
+
434
+ status = AttrR(
435
+ String(),
436
+ description="Status of HDF5 capture",
437
+ initial_value="OK",
438
+ )
439
+
440
+ def __init__(
441
+ self,
442
+ client_data: Callable[[bool, float], AsyncGenerator[Data, None]],
443
+ dataset_attributes: dict[PandaName, DatasetAttributes],
444
+ ):
445
+ super().__init__()
446
+
447
+ if find_spec("h5py") is None:
448
+ logging.warning("No HDF5 support detected - skipping creating HDF5 records")
449
+ return
450
+
451
+ self._client_data = client_data
452
+ self._dataset_table_wrapper = DatasetTableWrapper(dataset_attributes)
453
+ self._handle_hdf5_data_task = None
454
+
455
+ datasets_attribute = AttrR(
456
+ Table(self._dataset_table_wrapper.NUMPY_TYPE),
457
+ description="HDF5 dataset names.",
458
+ initial_value=self._dataset_table_wrapper.get_numpy_table(),
459
+ )
460
+ self.attributes["datasets"] = datasets_attribute
461
+ self._dataset_table_wrapper.set_on_update_callback(datasets_attribute)
462
+
463
+ self.hdf_directory.add_on_update_callback(self._update_directory_path)
464
+ self.hdf_file_name.add_on_update_callback(self._update_full_file_path)
465
+ self.capture.add_on_update_callback(self._capture_on_update)
466
+
467
+ async def _update_directory_path(self, new_val) -> None:
468
+ """Handles writes to the directory path PV, creating
469
+ directories based on the setting of the CreateDirectory record"""
470
+ new_path = Path(new_val).absolute()
471
+ create_dir_depth = self.create_directory.get()
472
+ max_dirs_to_create = 0
473
+ if create_dir_depth < 0:
474
+ max_dirs_to_create = abs(create_dir_depth)
475
+ elif create_dir_depth > len(new_path.parents):
476
+ max_dirs_to_create = 0
477
+ elif create_dir_depth > 0:
478
+ max_dirs_to_create = len(new_path.parents) - create_dir_depth
479
+
480
+ logging.debug(f"Permitted to create up to {max_dirs_to_create} dirs.")
481
+ dirs_to_create = 0
482
+ for p in reversed(new_path.parents):
483
+ if not p.exists():
484
+ if dirs_to_create == 0:
485
+ # First directory level that does not exist, log it.
486
+ logging.error(f"All dir from {str(p)} and below do not exist!")
487
+ dirs_to_create += 1
488
+ else:
489
+ logging.info(f"{str(p)} exists")
490
+
491
+ # Account for target path itself not existing
492
+ if not os.path.exists(new_path):
493
+ dirs_to_create += 1
494
+
495
+ logging.debug(f"Need to create {dirs_to_create} directories.")
496
+
497
+ # Case where all dirs exist
498
+ if dirs_to_create == 0:
499
+ if os.access(new_path, os.W_OK):
500
+ status_msg = "Dir exists and is writable"
501
+ await self.directory_exists.update(True)
502
+ else:
503
+ status_msg = "Dirs exist but aren't writable."
504
+ await self.directory_exists.update(False)
505
+ # Case where we will create directories
506
+ elif dirs_to_create <= max_dirs_to_create:
507
+ logging.debug(f"Attempting to create {dirs_to_create} dir(s)...")
508
+ try:
509
+ os.makedirs(new_path, exist_ok=True)
510
+ status_msg = f"Created {dirs_to_create} dirs."
511
+ await self.directory_exists.update(True)
512
+ except PermissionError:
513
+ status_msg = "Permission error creating dirs!"
514
+ await self.directory_exists.update(False)
515
+ # Case where too many directories need to be created
516
+ else:
517
+ status_msg = f"Need to create {dirs_to_create} > {max_dirs_to_create} dirs."
518
+ await self.directory_exists.update(False)
519
+
520
+ if self.directory_exists.get() == 0:
521
+ logging.error(status_msg)
522
+ else:
523
+ logging.debug(status_msg)
524
+
525
+ await self.status.update(status_msg)
526
+
527
+ await self._update_full_file_path(new_val)
528
+
529
+ async def _update_full_file_path(self, value) -> None:
530
+ await self.hdf_full_file_path.update(self._get_filepath())
531
+
532
+ async def _handle_hdf5_data(self) -> None:
533
+ """Handles writing HDF5 data from the PandA to file, based on configuration
534
+ in the various HDF5 records.
535
+ This method expects to be run as an asyncio Task."""
536
+ buffer: HDF5Buffer | None = None
537
+ try:
538
+ # Set up the hdf buffer
539
+
540
+ # TODO: Check if exists or writeable
541
+ if self.hdf_directory.get() == "":
542
+ raise RuntimeError(
543
+ "Configured HDF directory does not exist or is not writable!"
544
+ )
545
+
546
+ num_capture: int = self.num_capture.get()
547
+ capture_mode: CaptureMode = CaptureMode(self.capture_mode.get())
548
+ filepath = self._get_filepath()
549
+
550
+ await self.num_captured.update(0)
551
+ number_captured_setter_pipeline = NumCapturedSetter(
552
+ self.num_captured.update
553
+ )
554
+
555
+ await self.attributes["datasets"].update( # type: ignore
556
+ self._dataset_table_wrapper.get_numpy_table()
557
+ )
558
+
559
+ buffer = HDF5Buffer(
560
+ capture_mode,
561
+ Path(filepath),
562
+ num_capture,
563
+ self.status.update,
564
+ self.num_received.update,
565
+ number_captured_setter_pipeline,
566
+ self._dataset_table_wrapper.hdf_writer_names(),
567
+ )
568
+ flush_period: float = self.flush_period.get()
569
+ async for data in self._client_data(False, flush_period):
570
+ logging.debug(f"Received data packet: {data}")
571
+
572
+ await buffer.handle_data(data) # type: ignore
573
+ if buffer.finish_capturing:
574
+ break
575
+
576
+ except CancelledError:
577
+ logging.info("Capturing task cancelled, closing HDF5 file")
578
+ await self.status.update("Capturing disabled")
579
+ # Only send EndData if we know the file was opened - could be cancelled
580
+ # before PandA has actually send any data
581
+ if buffer and buffer.capture_mode != CaptureMode.LAST_N:
582
+ buffer.put_data_to_file(
583
+ EndData(buffer.number_of_received_rows, EndReason.MANUALLY_STOPPED)
584
+ )
585
+
586
+ except Exception:
587
+ logging.exception("HDF5 data capture terminated due to unexpected error")
588
+ await self.status.update(
589
+ "Capture disabled, unexpected exception.",
590
+ )
591
+ # Only send EndData if we know the file was opened - exception could happen
592
+ # before file was opened
593
+ if (
594
+ buffer
595
+ and buffer.start_data
596
+ and buffer.capture_mode != CaptureMode.LAST_N
597
+ ):
598
+ buffer.put_data_to_file(
599
+ EndData(buffer.number_of_received_rows, EndReason.UNKNOWN_EXCEPTION)
600
+ )
601
+
602
+ finally:
603
+ logging.debug("Finishing processing HDF5 PandA data")
604
+ await self.num_received.update(
605
+ buffer.number_of_received_rows if buffer else 0
606
+ )
607
+ await self.capture.update(False)
608
+
609
+ def _get_filepath(self) -> str:
610
+ """Create the file path for the HDF5 file from the relevant records"""
611
+ return "/".join([self.hdf_directory.get(), self.hdf_file_name.get()])
612
+
613
+ async def _capture_on_update(self, value) -> None:
614
+ """Process an update to the Capture record, to start/stop recording HDF5 data"""
615
+ logging.debug(f"Entering HDF5:Capture record on_update method, value {value}.")
616
+ if value:
617
+ if self._handle_hdf5_data_task:
618
+ logging.warning("Existing HDF5 capture running, cancelling it.")
619
+ self._handle_hdf5_data_task.cancel()
620
+
621
+ self._handle_hdf5_data_task = asyncio.create_task(self._handle_hdf5_data())
622
+ else:
623
+ if self._handle_hdf5_data_task is not None:
624
+ self._handle_hdf5_data_task.cancel() # Abort any HDF5 file writing
625
+ self._handle_hdf5_data_task = None
@@ -0,0 +1,69 @@
1
+ import logging
2
+ import re
3
+ from typing import TypedDict
4
+
5
+ from fastcs.attributes import AttrR
6
+ from fastcs.controllers import Controller
7
+ from fastcs.datatypes import String
8
+
9
+ from fastcs_pandablocks.types import WidgetGroup
10
+
11
+
12
+ class PandaVersions(TypedDict):
13
+ panda_sw: str
14
+ fpga: str
15
+ rootfs: str
16
+
17
+
18
+ def _parse_idn_response(idn_response: str) -> PandaVersions:
19
+ """Function that parses version info from the PandA's response to the IDN command
20
+
21
+ See: https://pandablocks-server.readthedocs.io/en/latest/commands.html#system-commands
22
+ """
23
+
24
+ # Currently, IDN reports sw, fpga, and rootfs versions
25
+ firmware_versions = {"PandA SW": "Unknown", "FPGA": "Unknown", "rootfs": "Unknown"}
26
+
27
+ # If the *IDN response contains too many keys, break and leave versions as "Unknown"
28
+ # Since spaces are used to deliminate versions and can also be in the keys and
29
+ # values, if an additional key is present that we don't explicitly handle,
30
+ # our approach of using regex matching will not work.
31
+ if sum(name in idn_response for name in firmware_versions) < idn_response.count(
32
+ ":"
33
+ ):
34
+ logging.error(
35
+ f"Recieved unexpected version numbers in version string {idn_response}!"
36
+ )
37
+ else:
38
+ for firmware_name in firmware_versions:
39
+ pattern = re.compile(
40
+ rf"{re.escape(firmware_name)}:\s*([^:]+?)(?=\s*\b(?: \
41
+ {'|'.join(map(re.escape, firmware_versions))}):|$)"
42
+ )
43
+ if match := pattern.search(idn_response):
44
+ firmware_versions[firmware_name] = match.group(1).strip()
45
+ logging.info(
46
+ f"{firmware_name} Version: {firmware_versions[firmware_name]}"
47
+ )
48
+ else:
49
+ logging.warning(f"Failed to get {firmware_name} version information!")
50
+
51
+ return PandaVersions(
52
+ panda_sw=firmware_versions["PandA SW"],
53
+ fpga=firmware_versions["FPGA"],
54
+ rootfs=firmware_versions["rootfs"],
55
+ )
56
+
57
+
58
+ class VersionController(Controller):
59
+ def __init__(self, idn_response: str):
60
+ super().__init__()
61
+ self.description = "Version information from the PandA."
62
+ versions = _parse_idn_response(idn_response)
63
+ for version_name, version in versions.items():
64
+ self.attributes[version_name] = AttrR(
65
+ String(),
66
+ description="Version information from the PandA.",
67
+ group=WidgetGroup.READBACKS.value,
68
+ initial_value=version, # type: ignore
69
+ )