cgse 2023.38.0__py3-none-any.whl → 2024.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- README.md +27 -0
- bump.py +85 -0
- cgse-2024.1.4.dist-info/METADATA +38 -0
- cgse-2024.1.4.dist-info/RECORD +5 -0
- {cgse-2023.38.0.dist-info → cgse-2024.1.4.dist-info}/WHEEL +1 -2
- cgse-2023.38.0.dist-info/COPYING +0 -674
- cgse-2023.38.0.dist-info/COPYING.LESSER +0 -165
- cgse-2023.38.0.dist-info/METADATA +0 -144
- cgse-2023.38.0.dist-info/RECORD +0 -649
- cgse-2023.38.0.dist-info/entry_points.txt +0 -75
- cgse-2023.38.0.dist-info/top_level.txt +0 -2
- egse/__init__.py +0 -12
- egse/__main__.py +0 -32
- egse/aeu/aeu.py +0 -5235
- egse/aeu/aeu_awg.yaml +0 -265
- egse/aeu/aeu_crio.yaml +0 -273
- egse/aeu/aeu_cs.py +0 -626
- egse/aeu/aeu_devif.py +0 -321
- egse/aeu/aeu_main_ui.py +0 -912
- egse/aeu/aeu_metrics.py +0 -131
- egse/aeu/aeu_protocol.py +0 -463
- egse/aeu/aeu_psu.yaml +0 -204
- egse/aeu/aeu_ui.py +0 -873
- egse/aeu/arbdata/FccdRead.arb +0 -2
- egse/aeu/arbdata/FccdRead_min_points.arb +0 -2
- egse/aeu/arbdata/HeaterSync_FccdRead.arb +0 -2
- egse/aeu/arbdata/HeaterSync_ccdRead25.arb +0 -2
- egse/aeu/arbdata/HeaterSync_ccdRead31_25.arb +0 -2
- egse/aeu/arbdata/HeaterSync_ccdRead37_50.arb +0 -2
- egse/aeu/arbdata/HeaterSync_ccdRead43_75.arb +0 -2
- egse/aeu/arbdata/HeaterSync_ccdRead50.arb +0 -2
- egse/aeu/arbdata/Heater_FccdRead_min_points.arb +0 -2
- egse/aeu/arbdata/ccdRead25.arb +0 -2
- egse/aeu/arbdata/ccdRead25_150ms.arb +0 -2
- egse/aeu/arbdata/ccdRead31_25.arb +0 -2
- egse/aeu/arbdata/ccdRead31_25_150ms.arb +0 -2
- egse/aeu/arbdata/ccdRead37_50.arb +0 -2
- egse/aeu/arbdata/ccdRead37_50_150ms.arb +0 -2
- egse/aeu/arbdata/ccdRead43_75.arb +0 -2
- egse/aeu/arbdata/ccdRead43_75_150ms.arb +0 -2
- egse/aeu/arbdata/ccdRead50.arb +0 -2
- egse/aeu/arbdata/ccdRead50_150ms.arb +0 -2
- egse/alert/__init__.py +0 -1049
- egse/alert/alertman.yaml +0 -37
- egse/alert/alertman_cs.py +0 -234
- egse/alert/alertman_ui.py +0 -603
- egse/alert/gsm/beaglebone.py +0 -138
- egse/alert/gsm/beaglebone.yaml +0 -51
- egse/alert/gsm/beaglebone_cs.py +0 -108
- egse/alert/gsm/beaglebone_devif.py +0 -130
- egse/alert/gsm/beaglebone_protocol.py +0 -48
- egse/bits.py +0 -318
- egse/camera.py +0 -44
- egse/collimator/__init__.py +0 -0
- egse/collimator/fcul/__init__.py +0 -0
- egse/collimator/fcul/ogse.py +0 -1077
- egse/collimator/fcul/ogse.yaml +0 -14
- egse/collimator/fcul/ogse_cs.py +0 -154
- egse/collimator/fcul/ogse_devif.py +0 -358
- egse/collimator/fcul/ogse_protocol.py +0 -129
- egse/collimator/fcul/ogse_sim.py +0 -431
- egse/collimator/fcul/ogse_ui.py +0 -1108
- egse/command.py +0 -699
- egse/config.py +0 -410
- egse/confman/__init__.py +0 -1015
- egse/confman/confman.yaml +0 -67
- egse/confman/confman_cs.py +0 -239
- egse/confman/confman_ui.py +0 -381
- egse/confman/setup_ui.py +0 -565
- egse/control.py +0 -442
- egse/coordinates/__init__.py +0 -531
- egse/coordinates/avoidance.py +0 -103
- egse/coordinates/cslmodel.py +0 -127
- egse/coordinates/laser_tracker_to_dict.py +0 -120
- egse/coordinates/point.py +0 -707
- egse/coordinates/pyplot.py +0 -195
- egse/coordinates/referenceFrame.py +0 -1279
- egse/coordinates/refmodel.py +0 -737
- egse/coordinates/rotationMatrix.py +0 -85
- egse/coordinates/transform3d_addon.py +0 -419
- egse/csl/__init__.py +0 -50
- egse/csl/commanding.py +0 -78
- egse/csl/icons/hexapod-connected-selected.svg +0 -30
- egse/csl/icons/hexapod-connected.svg +0 -30
- egse/csl/icons/hexapod-homing-selected.svg +0 -68
- egse/csl/icons/hexapod-homing.svg +0 -68
- egse/csl/icons/hexapod-retract-selected.svg +0 -56
- egse/csl/icons/hexapod-retract.svg +0 -51
- egse/csl/icons/hexapod-zero-selected.svg +0 -56
- egse/csl/icons/hexapod-zero.svg +0 -56
- egse/csl/icons/logo-puna.svg +0 -92
- egse/csl/icons/stop.svg +0 -1
- egse/csl/initialisation.py +0 -102
- egse/csl/mech_pos_settings.yaml +0 -18
- egse/das.py +0 -1247
- egse/das.yaml +0 -7
- egse/data/conf/SETUP_CSL_00000_170620_150000.yaml +0 -5
- egse/data/conf/SETUP_CSL_00001_170620_151010.yaml +0 -69
- egse/data/conf/SETUP_CSL_00002_170620_151020.yaml +0 -69
- egse/data/conf/SETUP_CSL_00003_170620_151030.yaml +0 -69
- egse/data/conf/SETUP_CSL_00004_170620_151040.yaml +0 -69
- egse/data/conf/SETUP_CSL_00005_170620_151050.yaml +0 -69
- egse/data/conf/SETUP_CSL_00006_170620_151060.yaml +0 -69
- egse/data/conf/SETUP_CSL_00007_170620_151070.yaml +0 -69
- egse/data/conf/SETUP_CSL_00008_170620_151080.yaml +0 -75
- egse/data/conf/SETUP_CSL_00010_210308_083016.yaml +0 -138
- egse/data/conf/SETUP_INTA_00000_170620_150000.yaml +0 -4
- egse/data/conf/SETUP_SRON_00000_170620_150000.yaml +0 -4
- egse/decorators.py +0 -415
- egse/device.py +0 -269
- egse/dpu/__init__.py +0 -2681
- egse/dpu/ccd_ui.py +0 -508
- egse/dpu/dpu.py +0 -786
- egse/dpu/dpu.yaml +0 -153
- egse/dpu/dpu_cs.py +0 -272
- egse/dpu/dpu_ui.py +0 -668
- egse/dpu/fitsgen.py +0 -2077
- egse/dpu/fitsgen_test.py +0 -752
- egse/dpu/fitsgen_ui.py +0 -399
- egse/dpu/hdf5_model.py +0 -332
- egse/dpu/hdf5_ui.py +0 -277
- egse/dpu/hdf5_viewer.py +0 -506
- egse/dpu/hk_ui.py +0 -468
- egse/dpu_commands.py +0 -81
- egse/dsi/constants.py +0 -220
- egse/dsi/esl.py +0 -870
- egse/dsi/rmap.py +0 -1042
- egse/dsi/rmapci.py +0 -37
- egse/dsi/spw.py +0 -154
- egse/dsi/spw_state.py +0 -29
- egse/dummy.py +0 -258
- egse/dyndummy.py +0 -179
- egse/env.py +0 -278
- egse/exceptions.py +0 -88
- egse/fdir/__init__.py +0 -28
- egse/fdir/fdir_manager.py +0 -85
- egse/fdir/fdir_manager.yaml +0 -51
- egse/fdir/fdir_manager_controller.py +0 -228
- egse/fdir/fdir_manager_cs.py +0 -164
- egse/fdir/fdir_manager_interface.py +0 -25
- egse/fdir/fdir_remote.py +0 -73
- egse/fdir/fdir_remote.yaml +0 -37
- egse/fdir/fdir_remote_controller.py +0 -50
- egse/fdir/fdir_remote_cs.py +0 -97
- egse/fdir/fdir_remote_interface.py +0 -14
- egse/fdir/fdir_remote_popup.py +0 -31
- egse/fee/__init__.py +0 -114
- egse/fee/f_fee_register.yaml +0 -43
- egse/fee/fee.py +0 -631
- egse/fee/feesim.py +0 -750
- egse/fee/n_fee_hk.py +0 -761
- egse/fee/nfee.py +0 -187
- egse/filterwheel/__init__.py +0 -4
- egse/filterwheel/eksma/__init__.py +0 -24
- egse/filterwheel/eksma/fw8smc4.py +0 -661
- egse/filterwheel/eksma/fw8smc4.yaml +0 -121
- egse/filterwheel/eksma/fw8smc4_cs.py +0 -144
- egse/filterwheel/eksma/fw8smc4_devif.py +0 -473
- egse/filterwheel/eksma/fw8smc4_protocol.py +0 -81
- egse/filterwheel/eksma/fw8smc4_ui.py +0 -940
- egse/filterwheel/eksma/fw8smc5.py +0 -111
- egse/filterwheel/eksma/fw8smc5.yaml +0 -105
- egse/filterwheel/eksma/fw8smc5_controller.py +0 -307
- egse/filterwheel/eksma/fw8smc5_cs.py +0 -141
- egse/filterwheel/eksma/fw8smc5_interface.py +0 -65
- egse/filterwheel/eksma/fw8smc5_simulator.py +0 -29
- egse/filterwheel/eksma/fw8smc5_ui.py +0 -1068
- egse/filterwheel/eksma/testpythonfw.py +0 -215
- egse/fov/__init__.py +0 -65
- egse/fov/fov_hk.py +0 -712
- egse/fov/fov_ui.py +0 -861
- egse/fov/fov_ui_controller.py +0 -140
- egse/fov/fov_ui_model.py +0 -200
- egse/fov/fov_ui_view.py +0 -345
- egse/gimbal/__init__.py +0 -32
- egse/gimbal/symetrie/__init__.py +0 -26
- egse/gimbal/symetrie/alpha.py +0 -586
- egse/gimbal/symetrie/generic_gimbal_ui.py +0 -1521
- egse/gimbal/symetrie/gimbal.py +0 -877
- egse/gimbal/symetrie/gimbal.yaml +0 -168
- egse/gimbal/symetrie/gimbal_cs.py +0 -183
- egse/gimbal/symetrie/gimbal_protocol.py +0 -135
- egse/gimbal/symetrie/gimbal_ui.py +0 -361
- egse/gimbal/symetrie/pmac.py +0 -1006
- egse/gimbal/symetrie/pmac_regex.py +0 -83
- egse/graph.py +0 -132
- egse/gui/__init__.py +0 -47
- egse/gui/buttons.py +0 -378
- egse/gui/focalplane.py +0 -1281
- egse/gui/formatter.py +0 -10
- egse/gui/led.py +0 -162
- egse/gui/limitswitch.py +0 -143
- egse/gui/mechanisms.py +0 -588
- egse/gui/states.py +0 -148
- egse/gui/stripchart.py +0 -729
- egse/gui/switch.py +0 -112
- egse/h5.py +0 -274
- egse/help/__init__.py +0 -0
- egse/help/help_ui.py +0 -126
- egse/hexapod/__init__.py +0 -32
- egse/hexapod/symetrie/__init__.py +0 -138
- egse/hexapod/symetrie/alpha.py +0 -874
- egse/hexapod/symetrie/dynalpha.py +0 -1387
- egse/hexapod/symetrie/hexapod_ui.py +0 -1516
- egse/hexapod/symetrie/pmac.py +0 -1010
- egse/hexapod/symetrie/pmac_regex.py +0 -83
- egse/hexapod/symetrie/puna.py +0 -1167
- egse/hexapod/symetrie/puna.yaml +0 -193
- egse/hexapod/symetrie/puna_cs.py +0 -196
- egse/hexapod/symetrie/puna_protocol.py +0 -131
- egse/hexapod/symetrie/puna_ui.py +0 -434
- egse/hexapod/symetrie/punaplus.py +0 -107
- egse/hexapod/symetrie/zonda.py +0 -872
- egse/hexapod/symetrie/zonda.yaml +0 -337
- egse/hexapod/symetrie/zonda_cs.py +0 -172
- egse/hexapod/symetrie/zonda_devif.py +0 -415
- egse/hexapod/symetrie/zonda_protocol.py +0 -119
- egse/hexapod/symetrie/zonda_ui.py +0 -449
- egse/hk.py +0 -765
- egse/icons/aeu-cs-start.svg +0 -117
- egse/icons/aeu-cs-stop.svg +0 -118
- egse/icons/aeu-cs.svg +0 -107
- egse/icons/aeu_cs-started.svg +0 -112
- egse/icons/aeu_cs-stopped.svg +0 -112
- egse/icons/aeu_cs.svg +0 -55
- egse/icons/alert.svg +0 -1
- egse/icons/arrow-double-left.png +0 -0
- egse/icons/arrow-double-right.png +0 -0
- egse/icons/arrow-up.svg +0 -11
- egse/icons/backward.svg +0 -1
- egse/icons/busy.svg +0 -1
- egse/icons/cleaning.svg +0 -115
- egse/icons/color-scheme.svg +0 -1
- egse/icons/cs-connected-alert.svg +0 -91
- egse/icons/cs-connected-disabled.svg +0 -43
- egse/icons/cs-connected.svg +0 -89
- egse/icons/cs-not-connected.svg +0 -44
- egse/icons/double-left-arrow.svg +0 -1
- egse/icons/double-right-arrow.svg +0 -1
- egse/icons/erase-disabled.svg +0 -19
- egse/icons/erase.svg +0 -59
- egse/icons/fitsgen-start.svg +0 -47
- egse/icons/fitsgen-stop.svg +0 -48
- egse/icons/fitsgen.svg +0 -1
- egse/icons/forward.svg +0 -1
- egse/icons/fov-hk-start.svg +0 -33
- egse/icons/fov-hk-stop.svg +0 -37
- egse/icons/fov-hk.svg +0 -1
- egse/icons/front-desk.svg +0 -1
- egse/icons/home-actioned.svg +0 -15
- egse/icons/home-disabled.svg +0 -15
- egse/icons/home.svg +0 -13
- egse/icons/info.svg +0 -1
- egse/icons/invalid.png +0 -0
- egse/icons/led-green.svg +0 -20
- egse/icons/led-grey.svg +0 -20
- egse/icons/led-orange.svg +0 -20
- egse/icons/led-red.svg +0 -20
- egse/icons/led-square-green.svg +0 -134
- egse/icons/led-square-grey.svg +0 -134
- egse/icons/led-square-orange.svg +0 -134
- egse/icons/led-square-red.svg +0 -134
- egse/icons/limit-switch-all-green.svg +0 -115
- egse/icons/limit-switch-all-red.svg +0 -117
- egse/icons/limit-switch-el+.svg +0 -116
- egse/icons/limit-switch-el-.svg +0 -117
- egse/icons/location-marker.svg +0 -1
- egse/icons/logo-dpu.svg +0 -48
- egse/icons/logo-gimbal.svg +0 -112
- egse/icons/logo-huber.svg +0 -23
- egse/icons/logo-ogse.svg +0 -31
- egse/icons/logo-puna.svg +0 -92
- egse/icons/logo-tcs.svg +0 -29
- egse/icons/logo-zonda.svg +0 -66
- egse/icons/maximize.svg +0 -1
- egse/icons/meter.svg +0 -1
- egse/icons/more.svg +0 -45
- egse/icons/n-fee-hk-start.svg +0 -24
- egse/icons/n-fee-hk-stop.svg +0 -25
- egse/icons/n-fee-hk.svg +0 -83
- egse/icons/observing-off.svg +0 -46
- egse/icons/observing-on.svg +0 -46
- egse/icons/open-document-hdf5.png +0 -0
- egse/icons/open-document-hdf5.svg +0 -21
- egse/icons/ops-mode.svg +0 -1
- egse/icons/play-green.svg +0 -17
- egse/icons/plugged-disabled.svg +0 -27
- egse/icons/plugged.svg +0 -21
- egse/icons/pm_ui.svg +0 -1
- egse/icons/power-button-green.svg +0 -27
- egse/icons/power-button-red.svg +0 -27
- egse/icons/power-button.svg +0 -27
- egse/icons/radar.svg +0 -1
- egse/icons/radioactive.svg +0 -2
- egse/icons/reload.svg +0 -1
- egse/icons/remote-control-off.svg +0 -28
- egse/icons/remote-control-on.svg +0 -28
- egse/icons/repeat-blue.svg +0 -15
- egse/icons/repeat.svg +0 -1
- egse/icons/settings.svg +0 -1
- egse/icons/shrink.svg +0 -1
- egse/icons/shutter.svg +0 -1
- egse/icons/sign-off.svg +0 -1
- egse/icons/sign-on.svg +0 -1
- egse/icons/sim-mode.svg +0 -1
- egse/icons/small-buttons-go.svg +0 -20
- egse/icons/small-buttons-minus.svg +0 -51
- egse/icons/small-buttons-plus.svg +0 -51
- egse/icons/sponge.svg +0 -220
- egse/icons/start-button-disabled.svg +0 -84
- egse/icons/start-button.svg +0 -50
- egse/icons/stop-button-disabled.svg +0 -84
- egse/icons/stop-button.svg +0 -50
- egse/icons/stop-red.svg +0 -17
- egse/icons/stop.svg +0 -1
- egse/icons/switch-disabled-square.svg +0 -87
- egse/icons/switch-disabled.svg +0 -15
- egse/icons/switch-off-square.svg +0 -87
- egse/icons/switch-off.svg +0 -72
- egse/icons/switch-on-square.svg +0 -87
- egse/icons/switch-on.svg +0 -61
- egse/icons/temperature-control.svg +0 -44
- egse/icons/th_ui_logo.svg +0 -1
- egse/icons/unplugged.svg +0 -23
- egse/icons/unvalid.png +0 -0
- egse/icons/user-interface.svg +0 -1
- egse/icons/vacuum.svg +0 -1
- egse/icons/valid.png +0 -0
- egse/icons/zoom-to-pixel-dark.svg +0 -64
- egse/icons/zoom-to-pixel-white.svg +0 -36
- egse/images/big-rotation-stage.png +0 -0
- egse/images/connected-100.png +0 -0
- egse/images/cross.svg +0 -6
- egse/images/disconnected-100.png +0 -0
- egse/images/gui-icon.png +0 -0
- egse/images/home.svg +0 -6
- egse/images/info-icon.png +0 -0
- egse/images/led-black.svg +0 -89
- egse/images/led-green.svg +0 -85
- egse/images/led-orange.svg +0 -85
- egse/images/led-red.svg +0 -85
- egse/images/load-icon.png +0 -0
- egse/images/load-setup.png +0 -0
- egse/images/load.png +0 -0
- egse/images/pause.png +0 -0
- egse/images/play-button.svg +0 -8
- egse/images/play.png +0 -0
- egse/images/process-status.png +0 -0
- egse/images/restart.png +0 -0
- egse/images/search.png +0 -0
- egse/images/sma.png +0 -0
- egse/images/start.png +0 -0
- egse/images/stop-button.svg +0 -8
- egse/images/stop.png +0 -0
- egse/images/switch-off.svg +0 -48
- egse/images/switch-on.svg +0 -48
- egse/images/undo.png +0 -0
- egse/images/update-button.svg +0 -11
- egse/imageviewer/exposureselection.py +0 -475
- egse/imageviewer/imageviewer.py +0 -198
- egse/imageviewer/matchfocalplane.py +0 -179
- egse/imageviewer/subfieldposition.py +0 -133
- egse/lampcontrol/__init__.py +0 -4
- egse/lampcontrol/beaglebone/beaglebone.py +0 -178
- egse/lampcontrol/beaglebone/beaglebone.yaml +0 -62
- egse/lampcontrol/beaglebone/beaglebone_cs.py +0 -106
- egse/lampcontrol/beaglebone/beaglebone_devif.py +0 -150
- egse/lampcontrol/beaglebone/beaglebone_protocol.py +0 -73
- egse/lampcontrol/energetiq/__init__.py +0 -22
- egse/lampcontrol/energetiq/eq99.yaml +0 -98
- egse/lampcontrol/energetiq/lampEQ99.py +0 -283
- egse/lampcontrol/energetiq/lampEQ99_cs.py +0 -128
- egse/lampcontrol/energetiq/lampEQ99_devif.py +0 -158
- egse/lampcontrol/energetiq/lampEQ99_encode_decode_errors.py +0 -73
- egse/lampcontrol/energetiq/lampEQ99_protocol.py +0 -69
- egse/lampcontrol/energetiq/lampEQ99_ui.py +0 -465
- egse/lib/CentOS-7/EtherSpaceLink_v34_86.dylib +0 -0
- egse/lib/CentOS-8/ESL-RMAP_v34_86.dylib +0 -0
- egse/lib/CentOS-8/EtherSpaceLink_v34_86.dylib +0 -0
- egse/lib/Debian/ESL-RMAP_v34_86.dylib +0 -0
- egse/lib/Debian/EtherSpaceLink_v34_86.dylib +0 -0
- egse/lib/Debian/libetherspacelink_v35_21.dylib +0 -0
- egse/lib/Linux/ESL-RMAP_v34_86.dylib +0 -0
- egse/lib/Linux/EtherSpaceLink_v34_86.dylib +0 -0
- egse/lib/Ubuntu-20/ESL-RMAP_v34_86.dylib +0 -0
- egse/lib/Ubuntu-20/EtherSpaceLink_v34_86.dylib +0 -0
- egse/lib/gssw/python3-gssw_2.2.3+31f63c9f-1_all.deb +0 -0
- egse/lib/macOS/ESL-RMAP_v34_86.dylib +0 -0
- egse/lib/macOS/EtherSpaceLink_v34_86.dylib +0 -0
- egse/lib/ximc/__pycache__/pyximc.cpython-38 2.pyc +0 -0
- egse/lib/ximc/__pycache__/pyximc.cpython-38.pyc +0 -0
- egse/lib/ximc/libximc.framework/Frameworks/libbindy.dylib +0 -0
- egse/lib/ximc/libximc.framework/Frameworks/libxiwrapper.dylib +0 -0
- egse/lib/ximc/libximc.framework/Headers/ximc.h +0 -5510
- egse/lib/ximc/libximc.framework/Resources/Info.plist +0 -42
- egse/lib/ximc/libximc.framework/Resources/keyfile.sqlite +0 -0
- egse/lib/ximc/libximc.framework/libbindy.so +0 -0
- egse/lib/ximc/libximc.framework/libximc +0 -0
- egse/lib/ximc/libximc.framework/libximc.so +0 -0
- egse/lib/ximc/libximc.framework/libximc.so.7.0.0 +0 -0
- egse/lib/ximc/libximc.framework/libxiwrapper.so +0 -0
- egse/lib/ximc/pyximc.py +0 -922
- egse/listener.py +0 -73
- egse/logger/__init__.py +0 -243
- egse/logger/log_cs.py +0 -321
- egse/metrics.py +0 -98
- egse/mixin.py +0 -464
- egse/monitoring.py +0 -95
- egse/ni/alarms/__init__.py +0 -26
- egse/ni/alarms/cdaq9375.py +0 -300
- egse/ni/alarms/cdaq9375.yaml +0 -89
- egse/ni/alarms/cdaq9375_cs.py +0 -130
- egse/ni/alarms/cdaq9375_devif.py +0 -183
- egse/ni/alarms/cdaq9375_protocol.py +0 -48
- egse/obs_inspection.py +0 -163
- egse/observer.py +0 -41
- egse/obsid.py +0 -163
- egse/powermeter/__init__.py +0 -0
- egse/powermeter/ni/__init__.py +0 -38
- egse/powermeter/ni/cdaq9184.py +0 -224
- egse/powermeter/ni/cdaq9184.yaml +0 -73
- egse/powermeter/ni/cdaq9184_cs.py +0 -130
- egse/powermeter/ni/cdaq9184_devif.py +0 -201
- egse/powermeter/ni/cdaq9184_protocol.py +0 -48
- egse/powermeter/ni/cdaq9184_ui.py +0 -544
- egse/powermeter/thorlabs/__init__.py +0 -25
- egse/powermeter/thorlabs/pm100a.py +0 -380
- egse/powermeter/thorlabs/pm100a.yaml +0 -132
- egse/powermeter/thorlabs/pm100a_cs.py +0 -136
- egse/powermeter/thorlabs/pm100a_devif.py +0 -127
- egse/powermeter/thorlabs/pm100a_protocol.py +0 -80
- egse/powermeter/thorlabs/pm100a_ui.py +0 -725
- egse/process.py +0 -451
- egse/procman/__init__.py +0 -811
- egse/procman/cannot_start_process_popup.py +0 -43
- egse/procman/procman.yaml +0 -49
- egse/procman/procman_cs.py +0 -201
- egse/procman/procman_ui.py +0 -2081
- egse/protocol.py +0 -603
- egse/proxy.py +0 -522
- egse/randomwalk.py +0 -140
- egse/reg.py +0 -585
- egse/reload.py +0 -122
- egse/reprocess.py +0 -675
- egse/resource.py +0 -333
- egse/rst.py +0 -135
- egse/search.py +0 -182
- egse/serialdevice.py +0 -190
- egse/services.py +0 -212
- egse/services.yaml +0 -51
- egse/settings.py +0 -379
- egse/settings.yaml +0 -980
- egse/setup.py +0 -1180
- egse/shutter/__init__.py +0 -0
- egse/shutter/thorlabs/__init__.py +0 -19
- egse/shutter/thorlabs/ksc101.py +0 -205
- egse/shutter/thorlabs/ksc101.yaml +0 -105
- egse/shutter/thorlabs/ksc101_cs.py +0 -136
- egse/shutter/thorlabs/ksc101_devif.py +0 -201
- egse/shutter/thorlabs/ksc101_protocol.py +0 -69
- egse/shutter/thorlabs/ksc101_ui.py +0 -548
- egse/shutter/thorlabs/sc10.py +0 -82
- egse/shutter/thorlabs/sc10.yaml +0 -52
- egse/shutter/thorlabs/sc10_controller.py +0 -81
- egse/shutter/thorlabs/sc10_cs.py +0 -108
- egse/shutter/thorlabs/sc10_interface.py +0 -25
- egse/shutter/thorlabs/sc10_simulator.py +0 -30
- egse/simulator.py +0 -41
- egse/slack.py +0 -61
- egse/socketdevice.py +0 -218
- egse/sockets.py +0 -218
- egse/spw.py +0 -1479
- egse/stages/__init__.py +0 -12
- egse/stages/aerotech/ensemble.py +0 -247
- egse/stages/aerotech/ensemble.yaml +0 -205
- egse/stages/aerotech/ensemble_controller.py +0 -275
- egse/stages/aerotech/ensemble_cs.py +0 -110
- egse/stages/aerotech/ensemble_interface.py +0 -132
- egse/stages/aerotech/ensemble_parameters.py +0 -433
- egse/stages/aerotech/ensemble_simulator.py +0 -27
- egse/stages/aerotech/mgse_sim.py +0 -193
- egse/stages/arun/smd3.py +0 -111
- egse/stages/arun/smd3.yaml +0 -68
- egse/stages/arun/smd3_controller.py +0 -472
- egse/stages/arun/smd3_cs.py +0 -112
- egse/stages/arun/smd3_interface.py +0 -53
- egse/stages/arun/smd3_simulator.py +0 -27
- egse/stages/arun/smd3_stop.py +0 -16
- egse/stages/huber/__init__.py +0 -49
- egse/stages/huber/smc9300.py +0 -904
- egse/stages/huber/smc9300.yaml +0 -63
- egse/stages/huber/smc9300_cs.py +0 -178
- egse/stages/huber/smc9300_devif.py +0 -345
- egse/stages/huber/smc9300_protocol.py +0 -111
- egse/stages/huber/smc9300_sim.py +0 -547
- egse/stages/huber/smc9300_ui.py +0 -973
- egse/state.py +0 -173
- egse/statemachine.py +0 -274
- egse/storage/__init__.py +0 -1004
- egse/storage/persistence.py +0 -2295
- egse/storage/storage.yaml +0 -72
- egse/storage/storage_cs.py +0 -214
- egse/styles/dark.qss +0 -343
- egse/styles/default.qss +0 -48
- egse/synoptics/__init__.py +0 -412
- egse/synoptics/syn.yaml +0 -9
- egse/synoptics/syn_cs.py +0 -195
- egse/system.py +0 -1408
- egse/tcs/__init__.py +0 -14
- egse/tcs/tcs.py +0 -874
- egse/tcs/tcs.yaml +0 -14
- egse/tcs/tcs_cs.py +0 -202
- egse/tcs/tcs_devif.py +0 -292
- egse/tcs/tcs_protocol.py +0 -177
- egse/tcs/tcs_sim.py +0 -177
- egse/tcs/tcs_ui.py +0 -543
- egse/tdms.py +0 -171
- egse/tempcontrol/__init__.py +0 -23
- egse/tempcontrol/agilent/agilent34970.py +0 -109
- egse/tempcontrol/agilent/agilent34970.yaml +0 -44
- egse/tempcontrol/agilent/agilent34970_cs.py +0 -116
- egse/tempcontrol/agilent/agilent34970_devif.py +0 -182
- egse/tempcontrol/agilent/agilent34970_protocol.py +0 -99
- egse/tempcontrol/agilent/agilent34972.py +0 -111
- egse/tempcontrol/agilent/agilent34972.yaml +0 -44
- egse/tempcontrol/agilent/agilent34972_cs.py +0 -117
- egse/tempcontrol/agilent/agilent34972_devif.py +0 -189
- egse/tempcontrol/agilent/agilent34972_protocol.py +0 -101
- egse/tempcontrol/beaglebone/beaglebone.py +0 -342
- egse/tempcontrol/beaglebone/beaglebone.yaml +0 -110
- egse/tempcontrol/beaglebone/beaglebone_cs.py +0 -117
- egse/tempcontrol/beaglebone/beaglebone_protocol.py +0 -135
- egse/tempcontrol/beaglebone/beaglebone_ui.py +0 -681
- egse/tempcontrol/digalox/digalox.py +0 -107
- egse/tempcontrol/digalox/digalox.yaml +0 -36
- egse/tempcontrol/digalox/digalox_cs.py +0 -112
- egse/tempcontrol/digalox/digalox_protocol.py +0 -55
- egse/tempcontrol/keithley/__init__.py +0 -33
- egse/tempcontrol/keithley/daq6510.py +0 -662
- egse/tempcontrol/keithley/daq6510.yaml +0 -105
- egse/tempcontrol/keithley/daq6510_cs.py +0 -163
- egse/tempcontrol/keithley/daq6510_devif.py +0 -343
- egse/tempcontrol/keithley/daq6510_protocol.py +0 -78
- egse/tempcontrol/keithley/daq6510_sim.py +0 -186
- egse/tempcontrol/lakeshore/__init__.py +0 -33
- egse/tempcontrol/lakeshore/lsci.py +0 -361
- egse/tempcontrol/lakeshore/lsci.yaml +0 -162
- egse/tempcontrol/lakeshore/lsci_cs.py +0 -174
- egse/tempcontrol/lakeshore/lsci_devif.py +0 -292
- egse/tempcontrol/lakeshore/lsci_protocol.py +0 -73
- egse/tempcontrol/lakeshore/lsci_ui.py +0 -389
- egse/tempcontrol/ni/__init__.py +0 -0
- egse/tempcontrol/spid/spid.py +0 -109
- egse/tempcontrol/spid/spid.yaml +0 -81
- egse/tempcontrol/spid/spid_controller.py +0 -279
- egse/tempcontrol/spid/spid_cs.py +0 -136
- egse/tempcontrol/spid/spid_protocol.py +0 -107
- egse/tempcontrol/spid/spid_ui.py +0 -727
- egse/tempcontrol/srs/__init__.py +0 -22
- egse/tempcontrol/srs/ptc10.py +0 -875
- egse/tempcontrol/srs/ptc10.yaml +0 -227
- egse/tempcontrol/srs/ptc10_cs.py +0 -128
- egse/tempcontrol/srs/ptc10_devif.py +0 -118
- egse/tempcontrol/srs/ptc10_protocol.py +0 -42
- egse/tempcontrol/srs/ptc10_ui.py +0 -906
- egse/ups/apc/apc.py +0 -236
- egse/ups/apc/apc.yaml +0 -45
- egse/ups/apc/apc_cs.py +0 -101
- egse/ups/apc/apc_protocol.py +0 -125
- egse/user.yaml +0 -7
- egse/vacuum/beaglebone/beaglebone.py +0 -149
- egse/vacuum/beaglebone/beaglebone.yaml +0 -44
- egse/vacuum/beaglebone/beaglebone_cs.py +0 -108
- egse/vacuum/beaglebone/beaglebone_devif.py +0 -164
- egse/vacuum/beaglebone/beaglebone_protocol.py +0 -193
- egse/vacuum/beaglebone/beaglebone_ui.py +0 -638
- egse/vacuum/instrutech/igm402.py +0 -92
- egse/vacuum/instrutech/igm402.yaml +0 -90
- egse/vacuum/instrutech/igm402_controller.py +0 -128
- egse/vacuum/instrutech/igm402_cs.py +0 -108
- egse/vacuum/instrutech/igm402_interface.py +0 -49
- egse/vacuum/instrutech/igm402_simulator.py +0 -36
- egse/vacuum/keller/kellerBus.py +0 -256
- egse/vacuum/keller/leo3.py +0 -102
- egse/vacuum/keller/leo3.yaml +0 -38
- egse/vacuum/keller/leo3_controller.py +0 -83
- egse/vacuum/keller/leo3_cs.py +0 -101
- egse/vacuum/keller/leo3_interface.py +0 -33
- egse/vacuum/mks/evision.py +0 -86
- egse/vacuum/mks/evision.yaml +0 -75
- egse/vacuum/mks/evision_cs.py +0 -101
- egse/vacuum/mks/evision_devif.py +0 -316
- egse/vacuum/mks/evision_interface.py +0 -60
- egse/vacuum/mks/evision_simulator.py +0 -24
- egse/vacuum/mks/evision_ui.py +0 -704
- egse/vacuum/pfeiffer/acp40.py +0 -87
- egse/vacuum/pfeiffer/acp40.yaml +0 -60
- egse/vacuum/pfeiffer/acp40_controller.py +0 -117
- egse/vacuum/pfeiffer/acp40_cs.py +0 -109
- egse/vacuum/pfeiffer/acp40_interface.py +0 -40
- egse/vacuum/pfeiffer/acp40_simulator.py +0 -39
- egse/vacuum/pfeiffer/tc400.py +0 -113
- egse/vacuum/pfeiffer/tc400.yaml +0 -83
- egse/vacuum/pfeiffer/tc400_controller.py +0 -140
- egse/vacuum/pfeiffer/tc400_cs.py +0 -109
- egse/vacuum/pfeiffer/tc400_interface.py +0 -70
- egse/vacuum/pfeiffer/tc400_simulator.py +0 -24
- egse/vacuum/pfeiffer/tpg261.py +0 -81
- egse/vacuum/pfeiffer/tpg261.yaml +0 -66
- egse/vacuum/pfeiffer/tpg261_controller.py +0 -150
- egse/vacuum/pfeiffer/tpg261_cs.py +0 -109
- egse/vacuum/pfeiffer/tpg261_interface.py +0 -60
- egse/vacuum/pfeiffer/tpg261_simulator.py +0 -24
- egse/version.py +0 -174
- egse/visitedpositions.py +0 -398
- egse/windowing.py +0 -213
- egse/zmq/__init__.py +0 -28
- egse/zmq/spw.py +0 -160
- egse/zmq_ser.py +0 -41
- scripts/alerts/cold.yaml +0 -278
- scripts/alerts/example_alerts.yaml +0 -54
- scripts/alerts/transition.yaml +0 -14
- scripts/alerts/warm.yaml +0 -49
- scripts/analyse_n_fee_hk_data.py +0 -44
- scripts/check_hdf5_files.py +0 -192
- scripts/check_register_sync.py +0 -47
- scripts/create_hdf5_report.py +0 -295
- scripts/csl_model.py +0 -436
- scripts/csl_restore_setup.py +0 -230
- scripts/export-grafana-dashboards.py +0 -50
- scripts/fdir/cs_recovery/fdir_cs_recovery.py +0 -59
- scripts/fdir/fdir_table.yaml +0 -70
- scripts/fdir/fdir_test_recovery.py +0 -11
- scripts/fdir/hw_recovery/fdir_agilent_hw_recovery.py +0 -73
- scripts/fdir/limit_recovery/fdir_agilent_limit.py +0 -64
- scripts/fdir/limit_recovery/fdir_bb_heater_limit.py +0 -61
- scripts/fdir/limit_recovery/fdir_ensemble_limit.py +0 -33
- scripts/fdir/limit_recovery/fdir_pressure_limit_recovery.py +0 -71
- scripts/fix_csv.py +0 -80
- scripts/n_fee_supply_voltage_calculation.py +0 -92
- scripts/playground.py +0 -30
- scripts/print_hdf5_hk_data.py +0 -68
- scripts/print_register_map.py +0 -43
- scripts/sron/commanding/control_heaters.py +0 -44
- scripts/sron/commanding/pumpdown.py +0 -46
- scripts/sron/commanding/set_pid_setpoint.py +0 -19
- scripts/sron/commanding/shutdown_bbb_heaters.py +0 -10
- scripts/sron/commanding/shutdown_pumps.py +0 -33
- scripts/sron/tm_gen/tm_gen_agilent.py +0 -38
- scripts/sron/tm_gen/tm_gen_heaters.py +0 -4
- scripts/sron/tm_gen/tm_gen_spid.py +0 -13
- scripts/update_operational_cgse.py +0 -268
- scripts/update_operational_cgse_old.py +0 -273
egse/dpu/__init__.py
DELETED
|
@@ -1,2681 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
This module defines the commanding interfaces for the DPU—N-FEE interaction.
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
On the *client/user side*, the `DPUProxy` class shall be used for user interactions and commanding with both
|
|
6
|
-
the DPU simulator and the N-FEE. This class connects to the DPU control server which must be running before
|
|
7
|
-
any commands can be processed.
|
|
8
|
-
|
|
9
|
-
On the *server side*, the `DPUControlServer` class is located in the module `dpu_cs`.
|
|
10
|
-
|
|
11
|
-
The top-level classes that are of interest to the developer when inspecting this module are:
|
|
12
|
-
|
|
13
|
-
* `DPUController` which puts the requested commands on the command queue for the `DPUProcessor`
|
|
14
|
-
* `DPUProcessor` is the work horse of the DPU simulator and runs in a separate process. This process communicates
|
|
15
|
-
directly with the N-FEE through the SpaceWire interface.
|
|
16
|
-
* `DPUInternals` keeps critical information about the DPU and provides information on the status of the
|
|
17
|
-
readout progress, i.e. where we are in the readout cycle.
|
|
18
|
-
* `DPUMonitoring` provides methods to run a function at a certain time, e.g. right after a long pulse, or to wait
|
|
19
|
-
for an event, e.g. when an HDF5 file is ready for processing.
|
|
20
|
-
|
|
21
|
-
The actual commanding is done in `dpu` module. That module also defines the `NFEEState` which acts as a
|
|
22
|
-
mirror of the FPGA status.
|
|
23
|
-
|
|
24
|
-
This module also defines a number of functions, but they all are for internal use and are not of any interest
|
|
25
|
-
unless you are maintaining this module.
|
|
26
|
-
|
|
27
|
-
"""
|
|
28
|
-
|
|
29
|
-
import logging
|
|
30
|
-
import multiprocessing
|
|
31
|
-
import pickle
|
|
32
|
-
import queue
|
|
33
|
-
from enum import Enum
|
|
34
|
-
|
|
35
|
-
import time
|
|
36
|
-
import traceback
|
|
37
|
-
from dataclasses import dataclass
|
|
38
|
-
from pathlib import Path
|
|
39
|
-
from typing import Any
|
|
40
|
-
from typing import Callable
|
|
41
|
-
from typing import Dict
|
|
42
|
-
from typing import List
|
|
43
|
-
from typing import Mapping
|
|
44
|
-
from typing import Tuple
|
|
45
|
-
from typing import Type
|
|
46
|
-
|
|
47
|
-
import zmq
|
|
48
|
-
|
|
49
|
-
from egse.command import ClientServerCommand
|
|
50
|
-
from egse.confman import ConfigurationManagerProxy
|
|
51
|
-
from egse.control import ControlServer
|
|
52
|
-
from egse.decorators import dynamic_interface
|
|
53
|
-
from egse.dpu.dpu import NFEEState, command_set_nfee_fpga_defaults
|
|
54
|
-
from egse.dpu.dpu import command_external_clock
|
|
55
|
-
from egse.dpu.dpu import command_get_hk_information
|
|
56
|
-
from egse.dpu.dpu import command_get_mode
|
|
57
|
-
from egse.dpu.dpu import command_internal_clock
|
|
58
|
-
from egse.dpu.dpu import command_reset
|
|
59
|
-
from egse.dpu.dpu import command_set_charge_injection
|
|
60
|
-
from egse.dpu.dpu import command_set_clear_error_flags
|
|
61
|
-
from egse.dpu.dpu import command_set_readout_order
|
|
62
|
-
from egse.dpu.dpu import command_set_dump_mode
|
|
63
|
-
from egse.dpu.dpu import command_set_dump_mode_int_sync
|
|
64
|
-
from egse.dpu.dpu import command_set_full_image_mode
|
|
65
|
-
from egse.dpu.dpu import command_set_full_image_mode_int_sync
|
|
66
|
-
from egse.dpu.dpu import command_set_full_image_pattern_mode
|
|
67
|
-
from egse.dpu.dpu import command_set_high_precision_hk_mode
|
|
68
|
-
from egse.dpu.dpu import command_set_immediate_on_mode
|
|
69
|
-
from egse.dpu.dpu import command_set_on_mode
|
|
70
|
-
from egse.dpu.dpu import command_set_register_value
|
|
71
|
-
from egse.dpu.dpu import command_set_reverse_clocking
|
|
72
|
-
from egse.dpu.dpu import command_set_standby_mode
|
|
73
|
-
from egse.dpu.dpu import command_set_vgd
|
|
74
|
-
from egse.dpu.dpu import command_sync_register_map
|
|
75
|
-
from egse.dpu.dpu import prio_command_get_mode
|
|
76
|
-
from egse.dpu.dpu import prio_command_get_register_map
|
|
77
|
-
from egse.dpu.dpu import prio_command_get_slicing
|
|
78
|
-
from egse.dpu.dpu import prio_command_get_sync_mode
|
|
79
|
-
from egse.dpu.dpu import prio_command_is_dump_mode
|
|
80
|
-
from egse.dpu.dpu import prio_command_set_slicing
|
|
81
|
-
from egse.dsi.esl import is_timecode
|
|
82
|
-
from egse.exceptions import Abort
|
|
83
|
-
from egse.fee import is_hk_data_packet
|
|
84
|
-
from egse.fee import n_fee_mode
|
|
85
|
-
from egse.fee.nfee import HousekeepingData
|
|
86
|
-
from egse.obsid import ObservationIdentifier
|
|
87
|
-
from egse.protocol import CommandProtocol
|
|
88
|
-
from egse.proxy import Proxy
|
|
89
|
-
from egse.reg import RegisterMap
|
|
90
|
-
from egse.settings import Settings
|
|
91
|
-
from egse.setup import SetupError
|
|
92
|
-
from egse.spw import DataDataPacket
|
|
93
|
-
from egse.spw import DataPacket
|
|
94
|
-
from egse.spw import DataPacketType
|
|
95
|
-
from egse.spw import HousekeepingPacket
|
|
96
|
-
from egse.spw import OverscanDataPacket
|
|
97
|
-
from egse.spw import SpaceWireInterface
|
|
98
|
-
from egse.spw import SpaceWirePacket
|
|
99
|
-
from egse.spw import TimecodePacket
|
|
100
|
-
from egse.spw import to_string
|
|
101
|
-
from egse.state import GlobalState
|
|
102
|
-
from egse.storage import StorageProxy
|
|
103
|
-
from egse.storage.persistence import FITS
|
|
104
|
-
from egse.storage.persistence import HDF5
|
|
105
|
-
from egse.storage.persistence import PersistenceLayer
|
|
106
|
-
from egse.system import SignalCatcher
|
|
107
|
-
from egse.system import Timer
|
|
108
|
-
from egse.system import format_datetime
|
|
109
|
-
from egse.system import wait_until
|
|
110
|
-
from egse.zmq import MessageIdentifier
|
|
111
|
-
from egse.zmq_ser import bind_address
|
|
112
|
-
from egse.zmq_ser import connect_address
|
|
113
|
-
|
|
114
|
-
LOGGER = logging.getLogger(__name__)
|
|
115
|
-
|
|
116
|
-
CTRL_SETTINGS = Settings.load("DPU Control Server")
|
|
117
|
-
N_FEE_SETTINGS = Settings.load("N-FEE")
|
|
118
|
-
COMMAND_SETTINGS = Settings.load(filename="dpu.yaml")
|
|
119
|
-
DEVICE_SETTINGS = Settings.load(filename="dpu.yaml")
|
|
120
|
-
|
|
121
|
-
CRUCIAL_REGISTER_PARAMETERS = (
|
|
122
|
-
"ccd_readout_order", "sensor_sel", "v_start", "v_end", "h_end", "ccd_mode_config"
|
|
123
|
-
)
|
|
124
|
-
|
|
125
|
-
DATA_TYPE: Dict[str, Type[PersistenceLayer]] = {
|
|
126
|
-
"HDF5": HDF5,
|
|
127
|
-
"FITS": FITS,
|
|
128
|
-
}
|
|
129
|
-
|
|
130
|
-
CCD_NUMBERS = [1, 2, 3, 4]
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
def rotate_list(seq, n):
|
|
134
|
-
if (size := len(seq)) < 2:
|
|
135
|
-
return seq
|
|
136
|
-
n = n % len(seq)
|
|
137
|
-
return seq[n:] + seq[:n]
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
def _get_ccd_readout_order(order_list: list, ccd_id_mapping: list):
|
|
141
|
-
return sum(ccd_id_mapping[ccd] << idx * 2 for idx, ccd in enumerate(order_list))
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
class NoDataPacketError(Exception):
|
|
145
|
-
"""Raised when the expected data packet turns out to be something else."""
|
|
146
|
-
pass
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
class NoHousekeepingPacketError(Exception):
|
|
150
|
-
"""Raised when the expected Housekeeping packet turns out to be something else."""
|
|
151
|
-
pass
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
class NoTimeCodeError(Exception):
|
|
155
|
-
"""Raised when the expected Timecode packet turns out to be something else."""
|
|
156
|
-
pass
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
class NoBytesReceivedError(Exception):
|
|
160
|
-
"""Raised when the zero or one bytes were received."""
|
|
161
|
-
pass
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
class TimecodeTimeoutError(Exception):
|
|
165
|
-
"""Raised when the read_packet times out while waiting for a timecode."""
|
|
166
|
-
pass
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
class TimeExceededError(Exception):
|
|
170
|
-
"""Raised when retrieving the data packets from the N-FEE takes too long."""
|
|
171
|
-
pass
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
class NFEECommandError(Exception):
|
|
175
|
-
"""Raised when sending a command to the N-FEE failed."""
|
|
176
|
-
pass
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
class DPUInterface:
|
|
180
|
-
"""
|
|
181
|
-
This interface is for sending commands to the DPU Control Server. The commands are user
|
|
182
|
-
oriented and will be translated by the DPU Controller in proper FEE commands.
|
|
183
|
-
|
|
184
|
-
The interface should be implemented by the `DPUController` and the `DPUProxy` (and possibly
|
|
185
|
-
a `DPUSimulator` should we need that e.g. for testing purposes).
|
|
186
|
-
|
|
187
|
-
The command shall also be added to the `dpu.yaml` command definitions file.
|
|
188
|
-
"""
|
|
189
|
-
|
|
190
|
-
@dynamic_interface
|
|
191
|
-
def marker(self, mark: str):
|
|
192
|
-
raise NotImplementedError
|
|
193
|
-
|
|
194
|
-
@dynamic_interface
|
|
195
|
-
def get_slicing(self) -> int:
|
|
196
|
-
raise NotImplementedError
|
|
197
|
-
|
|
198
|
-
@dynamic_interface
|
|
199
|
-
def set_slicing(self, num_cycles: int):
|
|
200
|
-
raise NotImplementedError
|
|
201
|
-
|
|
202
|
-
@dynamic_interface
|
|
203
|
-
def is_simulator(self):
|
|
204
|
-
raise NotImplementedError
|
|
205
|
-
|
|
206
|
-
@dynamic_interface
|
|
207
|
-
def get_register_map(self) -> RegisterMap:
|
|
208
|
-
"""
|
|
209
|
-
Returns the RegisterMap
|
|
210
|
-
"""
|
|
211
|
-
raise NotImplementedError
|
|
212
|
-
|
|
213
|
-
@dynamic_interface
|
|
214
|
-
def n_fee_sync_register_map(self):
|
|
215
|
-
"""
|
|
216
|
-
Read the complete register map from the N-FEE.
|
|
217
|
-
"""
|
|
218
|
-
raise NotImplementedError
|
|
219
|
-
|
|
220
|
-
@dynamic_interface
|
|
221
|
-
def n_fee_get_mode(self):
|
|
222
|
-
"""
|
|
223
|
-
Returns the N-FEE mode.
|
|
224
|
-
"""
|
|
225
|
-
raise NotImplementedError
|
|
226
|
-
|
|
227
|
-
@dynamic_interface
|
|
228
|
-
def n_fee_get_sync_mode(self):
|
|
229
|
-
"""
|
|
230
|
-
Returns the N-FEE mode.
|
|
231
|
-
"""
|
|
232
|
-
raise NotImplementedError
|
|
233
|
-
|
|
234
|
-
@dynamic_interface
|
|
235
|
-
def n_fee_set_on_mode(self):
|
|
236
|
-
"""Command the N-FEE to go into ON mode."""
|
|
237
|
-
raise NotImplementedError
|
|
238
|
-
|
|
239
|
-
@dynamic_interface
|
|
240
|
-
def n_fee_is_dump_mode(self):
|
|
241
|
-
"""
|
|
242
|
-
Returns True if the N-FEE is configured for DUMP mode.
|
|
243
|
-
|
|
244
|
-
DUMP mode is not really an N-FEE mode, but more a set of register settings that allow to
|
|
245
|
-
readout/dump all CCDs without transmitting any data. This mode is used in ambient to make
|
|
246
|
-
sure the detectors do not get saturated between tests.
|
|
247
|
-
"""
|
|
248
|
-
raise NotImplementedError
|
|
249
|
-
|
|
250
|
-
@dynamic_interface
|
|
251
|
-
def n_fee_set_immediate_on_mode(self):
|
|
252
|
-
"""Command the N-FEE to go into ON mode."""
|
|
253
|
-
raise NotImplementedError
|
|
254
|
-
|
|
255
|
-
@dynamic_interface
|
|
256
|
-
def n_fee_set_standby_mode(self):
|
|
257
|
-
"""Command the N-FEE to go into STANDBY mode."""
|
|
258
|
-
raise NotImplementedError
|
|
259
|
-
|
|
260
|
-
@dynamic_interface
|
|
261
|
-
def n_fee_set_dump_mode(self, n_fee_parameters: dict):
|
|
262
|
-
""" Command the N-FEE to go into DUMP mode.
|
|
263
|
-
|
|
264
|
-
n_fee_parameters:
|
|
265
|
-
|
|
266
|
-
The n_fee_parameters argument is a dictionary with additional/specific parameters to
|
|
267
|
-
set in the register when moving to full image pattern mode.
|
|
268
|
-
|
|
269
|
-
* num_cycles (int): the number of readout cycles
|
|
270
|
-
* v_start (int): the first line to readout
|
|
271
|
-
* v_end (int): the last line to readout
|
|
272
|
-
* sensor_sel (int): the CCD side to select for transfer
|
|
273
|
-
|
|
274
|
-
Args:
|
|
275
|
-
n_fee_parameters (dict): dictionary with N-FEE parameters to be set_
|
|
276
|
-
"""
|
|
277
|
-
raise NotImplementedError
|
|
278
|
-
|
|
279
|
-
@dynamic_interface
|
|
280
|
-
def n_fee_set_dump_mode_int_sync(self, n_fee_parameters: dict):
|
|
281
|
-
""" Command the N-FEE to go into DUMP mode and internal sync.
|
|
282
|
-
|
|
283
|
-
The n_fee_parameters argument is a dictionary with additional/specific parameters to
|
|
284
|
-
set in the register when moving to full image pattern mode.
|
|
285
|
-
|
|
286
|
-
* num_cycles (int): the number of readout cycles
|
|
287
|
-
* v_start (int): the first line to readout
|
|
288
|
-
* v_end (int): the last line to readout
|
|
289
|
-
* sensor_sel (int): the CCD side to select for transfer
|
|
290
|
-
* int_sync_period (int): the period of the internal sync in milliseconds
|
|
291
|
-
|
|
292
|
-
Args:
|
|
293
|
-
n_fee_parameters (dict): dictionary with N-FEE parameters to be set_
|
|
294
|
-
"""
|
|
295
|
-
raise NotImplementedError
|
|
296
|
-
|
|
297
|
-
@dynamic_interface
|
|
298
|
-
def n_fee_set_full_image_mode(self, n_fee_parameters):
|
|
299
|
-
"""
|
|
300
|
-
Command the N-FEE to go into FULL_IMAGE mode.
|
|
301
|
-
|
|
302
|
-
n_fee_parameters:
|
|
303
|
-
|
|
304
|
-
The n_fee_parameters argument is a dictionary with additional/specific parameters to
|
|
305
|
-
set in the register when moving to full image pattern mode.
|
|
306
|
-
|
|
307
|
-
* num_cycles (int): the number of readout cycles
|
|
308
|
-
* v_start (int): the first line to readout
|
|
309
|
-
* v_end (int): the last line to readout
|
|
310
|
-
* sensor_sel (int): the CCD side to select for transfer
|
|
311
|
-
|
|
312
|
-
Args:
|
|
313
|
-
n_fee_parameters (dict): dictionary with N-FEE parameters to be set
|
|
314
|
-
"""
|
|
315
|
-
raise NotImplementedError
|
|
316
|
-
|
|
317
|
-
@dynamic_interface
|
|
318
|
-
def n_fee_set_full_image_mode_int_sync(self, n_fee_parameters):
|
|
319
|
-
"""
|
|
320
|
-
Command the N-FEE to go into FULL_IMAGE mode and internal sync.
|
|
321
|
-
|
|
322
|
-
n_fee_parameters:
|
|
323
|
-
|
|
324
|
-
The n_fee_parameters argument is a dictionary with additional/specific parameters to
|
|
325
|
-
set in the register when moving to full image pattern mode.
|
|
326
|
-
|
|
327
|
-
* num_cycles (int): the number of readout cycles
|
|
328
|
-
* v_start (int): the first line to readout
|
|
329
|
-
* v_end (int): the last line to readout
|
|
330
|
-
* sensor_sel (int): the CCD side to select for transfer
|
|
331
|
-
|
|
332
|
-
Args:
|
|
333
|
-
n_fee_parameters (dict): dictionary with N-FEE parameters to be set
|
|
334
|
-
"""
|
|
335
|
-
raise NotImplementedError
|
|
336
|
-
|
|
337
|
-
@dynamic_interface
|
|
338
|
-
def n_fee_set_full_image_pattern_mode(self, n_fee_parameters):
|
|
339
|
-
"""
|
|
340
|
-
Command the N-FEE to go into FULL_IMAGE_PATTERN mode.
|
|
341
|
-
|
|
342
|
-
n_fee_parameters:
|
|
343
|
-
|
|
344
|
-
The n_fee_parameters argument is a dictionary with additional/specific parameters to
|
|
345
|
-
set in the register when moving to full image pattern mode.
|
|
346
|
-
|
|
347
|
-
* num_cycles (int): the number of readout cycles
|
|
348
|
-
* v_start (int): the first line to readout
|
|
349
|
-
* v_end (int): the last line to readout
|
|
350
|
-
* sensor_sel (int): the CCD side to select for transfer
|
|
351
|
-
|
|
352
|
-
Args:
|
|
353
|
-
n_fee_parameters (dict): dictionary with N-FEE parameters to be set
|
|
354
|
-
"""
|
|
355
|
-
raise NotImplementedError
|
|
356
|
-
|
|
357
|
-
@dynamic_interface
|
|
358
|
-
def n_fee_high_precision_hk_mode(self, n_fee_parameters: dict):
|
|
359
|
-
"""Command the N-FEE to go into high precision housekeeping mode."""
|
|
360
|
-
raise NotImplementedError
|
|
361
|
-
|
|
362
|
-
@dynamic_interface
|
|
363
|
-
def n_fee_set_internal_sync(self, n_fee_parameters: dict):
|
|
364
|
-
"""
|
|
365
|
-
Command the N-FEE to go into internal sync mode.
|
|
366
|
-
|
|
367
|
-
The method expects the following keys in n_fee_parameters:
|
|
368
|
-
|
|
369
|
-
* int_sync_period: the internal sync period in milliseconds [default=6250]
|
|
370
|
-
|
|
371
|
-
Args:
|
|
372
|
-
n_fee_parameters (dict): N-FEE parameter dictionary
|
|
373
|
-
"""
|
|
374
|
-
raise NotImplementedError
|
|
375
|
-
|
|
376
|
-
@dynamic_interface
|
|
377
|
-
def n_fee_set_external_sync(self, n_fee_parameters: dict):
|
|
378
|
-
"""
|
|
379
|
-
Command the N-FEE to go into external sync mode.
|
|
380
|
-
No keys are expected in n_fee_parameters, pass an empty dict.
|
|
381
|
-
"""
|
|
382
|
-
raise NotImplementedError
|
|
383
|
-
|
|
384
|
-
@dynamic_interface
|
|
385
|
-
def n_fee_set_register_value(self, reg_name: str, field_name: str, field_value: int):
|
|
386
|
-
"""Command the N-FEE to set a register value."""
|
|
387
|
-
raise NotImplementedError
|
|
388
|
-
|
|
389
|
-
@dynamic_interface
|
|
390
|
-
def n_fee_reset(self):
|
|
391
|
-
"""Command the N-FEE to reset to its default settings."""
|
|
392
|
-
raise NotImplementedError
|
|
393
|
-
|
|
394
|
-
@dynamic_interface
|
|
395
|
-
def n_fee_set_clear_error_flags(self):
|
|
396
|
-
"""
|
|
397
|
-
Command the N-FEE to clear all error flags for non RMAP/SpW related functions immediately.
|
|
398
|
-
|
|
399
|
-
The `clear_error_flag` bit in the register map is set to 1, meaning that all error flags
|
|
400
|
-
that are generated by the N-FEE FPGA for non RMAP-SpW related functions are cleared
|
|
401
|
-
immediately. This bit is cleared automatically, so that any future error flags can be
|
|
402
|
-
latched again. If the error conditions persist and no corrective measures are taken,
|
|
403
|
-
then error flags would be set again.
|
|
404
|
-
"""
|
|
405
|
-
|
|
406
|
-
raise NotImplementedError
|
|
407
|
-
|
|
408
|
-
@dynamic_interface
|
|
409
|
-
def n_fee_set_reverse_clocking(self, n_fee_parameters: dict):
|
|
410
|
-
|
|
411
|
-
raise NotImplementedError
|
|
412
|
-
|
|
413
|
-
@dynamic_interface
|
|
414
|
-
def n_fee_set_charge_injection(self, n_fee_parameters: dict):
|
|
415
|
-
raise NotImplementedError
|
|
416
|
-
|
|
417
|
-
@dynamic_interface
|
|
418
|
-
def n_fee_set_vgd(self, n_fee_parameters: dict):
|
|
419
|
-
raise NotImplementedError
|
|
420
|
-
|
|
421
|
-
@dynamic_interface
|
|
422
|
-
def n_fee_set_fpga_defaults(self):
|
|
423
|
-
raise NotImplementedError
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
class DPUSimulator(DPUInterface):
|
|
427
|
-
# The DPUSimulator will stand by itself, which means it will not send commands to an FEE
|
|
428
|
-
# nor will it request data or HK from the FEE. The methods in this implementation will return
|
|
429
|
-
# a fake set of data.
|
|
430
|
-
|
|
431
|
-
def n_fee_get_mode(self):
|
|
432
|
-
return n_fee_mode.STAND_BY_MODE
|
|
433
|
-
|
|
434
|
-
def n_fee_get_sync_mode(self):
|
|
435
|
-
return NotImplemented
|
|
436
|
-
|
|
437
|
-
def n_fee_set_on_mode(self):
|
|
438
|
-
pass
|
|
439
|
-
|
|
440
|
-
def n_fee_set_standby_mode(self):
|
|
441
|
-
pass
|
|
442
|
-
|
|
443
|
-
def n_fee_set_dump_mode(self, n_fee_parameters: dict):
|
|
444
|
-
pass
|
|
445
|
-
|
|
446
|
-
def n_fee_set_full_image_mode(self, n_fee_parameters):
|
|
447
|
-
import pprint
|
|
448
|
-
|
|
449
|
-
LOGGER.debug(f"called: n_fee_set_full_image_mode({pprint.pformat(n_fee_parameters)})")
|
|
450
|
-
|
|
451
|
-
def n_fee_set_full_image_mode_int_sync(self, n_fee_parameters):
|
|
452
|
-
import pprint
|
|
453
|
-
|
|
454
|
-
LOGGER.debug(f"called: n_fee_set_full_image_mode_int_sync({pprint.pformat(n_fee_parameters)})")
|
|
455
|
-
|
|
456
|
-
def n_fee_set_full_image_pattern_mode(self):
|
|
457
|
-
pass
|
|
458
|
-
|
|
459
|
-
def n_fee_high_precision_hk_mode(self, n_fee_parameters: dict):
|
|
460
|
-
pass
|
|
461
|
-
|
|
462
|
-
def n_fee_set_internal_sync(self, n_fee_parameters: dict):
|
|
463
|
-
pass
|
|
464
|
-
|
|
465
|
-
def n_fee_set_external_sync(self):
|
|
466
|
-
pass
|
|
467
|
-
|
|
468
|
-
def n_fee_set_clear_error_flags(self):
|
|
469
|
-
pass
|
|
470
|
-
|
|
471
|
-
def n_fee_set_fpga_defaults(self):
|
|
472
|
-
pass
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
class DPUController(DPUInterface):
|
|
476
|
-
"""
|
|
477
|
-
The DPU Controller puts commands on the command queue for processing by the DPU Processor.
|
|
478
|
-
Any response from the DPU Processor will be available on the response queue as soon as the
|
|
479
|
-
command has been executed. The DPU Processor is a separate process that is started by the
|
|
480
|
-
DPU Command Protocol.
|
|
481
|
-
"""
|
|
482
|
-
|
|
483
|
-
def __init__(self,
|
|
484
|
-
priority_queue: multiprocessing.Queue,
|
|
485
|
-
command_queue: multiprocessing.Queue,
|
|
486
|
-
response_queue: multiprocessing.Queue):
|
|
487
|
-
self._priority_q = priority_queue
|
|
488
|
-
self._command_q = command_queue
|
|
489
|
-
self._response_q = response_queue
|
|
490
|
-
|
|
491
|
-
try:
|
|
492
|
-
self.default_ccd_readout_order = GlobalState.setup.camera.fee.ccd_numbering.DEFAULT_CCD_READOUT_ORDER
|
|
493
|
-
self.sensor_sel_both_sides = GlobalState.setup.camera.fee.sensor_sel.enum.BOTH_SIDES.value
|
|
494
|
-
except AttributeError:
|
|
495
|
-
raise SetupError("No entry in the setup for camera.fee.ccd_numbering.DEFAULT_CCD_READOUT_ORDER")
|
|
496
|
-
|
|
497
|
-
def marker(self, mark: str):
|
|
498
|
-
LOGGER.info(f"{mark = }")
|
|
499
|
-
|
|
500
|
-
def get_slicing(self) -> int:
|
|
501
|
-
self._priority_q.put((prio_command_get_slicing, []))
|
|
502
|
-
LOGGER.debug("Controller.get_slicing: Put prio_command_get_slicing on the Queue.")
|
|
503
|
-
(cmd, response) = self._response_q.get()
|
|
504
|
-
LOGGER.debug(f"Controller.get_slicing returned: ({cmd.__name__}, {response}).")
|
|
505
|
-
return response
|
|
506
|
-
|
|
507
|
-
def set_slicing(self, num_cycles: int):
|
|
508
|
-
self._priority_q.put((prio_command_set_slicing, [num_cycles]))
|
|
509
|
-
LOGGER.debug(
|
|
510
|
-
"Controller.set_slicing: Put prio_command_set_slicing on the Queue."
|
|
511
|
-
)
|
|
512
|
-
(cmd, response) = self._response_q.get()
|
|
513
|
-
LOGGER.debug(f"Controller.set_slicing returned: ({cmd.__name__}, {response}).")
|
|
514
|
-
return response
|
|
515
|
-
|
|
516
|
-
def is_simulator(self):
|
|
517
|
-
return True
|
|
518
|
-
|
|
519
|
-
def n_fee_sync_register_map(self) -> RegisterMap:
|
|
520
|
-
self._command_q.put((command_sync_register_map, [], {}))
|
|
521
|
-
LOGGER.debug(
|
|
522
|
-
"Controller.n_fee_sync_register_map: Put command_sync_register_map on the Queue."
|
|
523
|
-
)
|
|
524
|
-
(cmd, response) = self._response_q.get()
|
|
525
|
-
LOGGER.debug(f"Controller.n_fee_sync_register_map returned: ({cmd.__name__}, {response}).")
|
|
526
|
-
return response
|
|
527
|
-
|
|
528
|
-
def get_register_map(self) -> RegisterMap:
|
|
529
|
-
self._priority_q.put((prio_command_get_register_map, []))
|
|
530
|
-
LOGGER.debug("Controller.get_register_map: Put prio_command_get_register_map on the Queue.")
|
|
531
|
-
(cmd, response) = self._response_q.get()
|
|
532
|
-
LOGGER.debug(f"Controller.get_register_map returned: ({cmd.__name__}, {response}).")
|
|
533
|
-
return response
|
|
534
|
-
|
|
535
|
-
def n_fee_get_mode(self):
|
|
536
|
-
self._priority_q.put((prio_command_get_mode, []))
|
|
537
|
-
LOGGER.debug("Controller.n_fee_get_mode: Put prio_command_get_mode on the Queue.")
|
|
538
|
-
(cmd, response) = self._response_q.get()
|
|
539
|
-
LOGGER.debug(f"Controller.n_fee_get_mode returned: ({cmd.__name__}, {response}).")
|
|
540
|
-
return response
|
|
541
|
-
|
|
542
|
-
def n_fee_get_sync_mode(self):
|
|
543
|
-
self._priority_q.put((prio_command_get_sync_mode, []))
|
|
544
|
-
LOGGER.debug("Controller.n_fee_get_sync_mode: Put prio_command_get_sync_mode on the Queue.")
|
|
545
|
-
(cmd, response) = self._response_q.get()
|
|
546
|
-
LOGGER.debug(f"Controller.n_fee_get_sync_mode returned: ({cmd.__name__}, {response}).")
|
|
547
|
-
return response
|
|
548
|
-
|
|
549
|
-
def n_fee_is_dump_mode(self):
|
|
550
|
-
self._priority_q.put((prio_command_is_dump_mode, []))
|
|
551
|
-
LOGGER.debug("Controller.n_fee_is_dump_mode: Put prio_command_is_dump_mode on the Queue.")
|
|
552
|
-
(cmd, response) = self._response_q.get()
|
|
553
|
-
LOGGER.debug(f"Controller.n_fee_is_dump_mode returned: ({cmd.__name__}, {response}).")
|
|
554
|
-
return response
|
|
555
|
-
|
|
556
|
-
def n_fee_set_immediate_on_mode(self):
|
|
557
|
-
self._command_q.put((command_set_immediate_on_mode, [], {}))
|
|
558
|
-
LOGGER.debug(
|
|
559
|
-
"Controller.n_fee_set_immediate_on_mode: Put command_set_immediate_on_mode "
|
|
560
|
-
"on the Queue."
|
|
561
|
-
)
|
|
562
|
-
(cmd, response) = self._response_q.get()
|
|
563
|
-
LOGGER.debug(
|
|
564
|
-
f"Controller.n_fee_set_immediate_on_mode returned: ({cmd.__name__}, {response})"
|
|
565
|
-
)
|
|
566
|
-
return response
|
|
567
|
-
|
|
568
|
-
def n_fee_set_on_mode(self):
|
|
569
|
-
self._command_q.put((command_set_on_mode, [], {}))
|
|
570
|
-
LOGGER.debug("Controller.n_fee_set_on_mode: Put command_set_on_mode on the Queue.")
|
|
571
|
-
(cmd, response) = self._response_q.get()
|
|
572
|
-
LOGGER.debug(f"Controller.n_fee_set_on_mode returned: ({cmd.__name__}, {response})")
|
|
573
|
-
return response
|
|
574
|
-
|
|
575
|
-
def n_fee_set_standby_mode(self):
|
|
576
|
-
self._command_q.put((command_set_standby_mode, [], {}))
|
|
577
|
-
LOGGER.debug(
|
|
578
|
-
"Controller.n_fee_set_standby_mode: Put command_set_standby_mode on the Queue."
|
|
579
|
-
)
|
|
580
|
-
(cmd, response) = self._response_q.get()
|
|
581
|
-
LOGGER.debug(f"Controller.n_fee_set_standby_mode returned: ({cmd.__name__}, {response})")
|
|
582
|
-
return response
|
|
583
|
-
|
|
584
|
-
def n_fee_set_dump_mode(self, n_fee_parameters: dict):
|
|
585
|
-
v_start = n_fee_parameters.get("v_start", 0)
|
|
586
|
-
v_end = n_fee_parameters.get("v_end", 0)
|
|
587
|
-
sensor_sel_ = n_fee_parameters.get("sensor_sel", self.sensor_sel_both_sides)
|
|
588
|
-
ccd_readout_order = n_fee_parameters.get("ccd_readout_order", self.default_ccd_readout_order)
|
|
589
|
-
n_final_dump = n_fee_parameters.get("n_final_dump", 4510)
|
|
590
|
-
sync_sel = n_fee_parameters.get("sync_sel", 0)
|
|
591
|
-
num_cycles = n_fee_parameters.get("num_cycles", 0)
|
|
592
|
-
self._command_q.put((command_set_dump_mode,
|
|
593
|
-
[v_start, v_end, sensor_sel_, ccd_readout_order, n_final_dump, sync_sel],
|
|
594
|
-
{'num_cycles': num_cycles}))
|
|
595
|
-
LOGGER.debug(
|
|
596
|
-
"Controller.n_fee_set_dump_mode: Put command_set_dump_mode on the Queue."
|
|
597
|
-
)
|
|
598
|
-
(cmd, response) = self._response_q.get()
|
|
599
|
-
LOGGER.debug(f"Controller.n_fee_set_dump_mode returned: ({cmd.__name__}, {response})")
|
|
600
|
-
return response
|
|
601
|
-
|
|
602
|
-
def n_fee_set_dump_mode_int_sync(self, n_fee_parameters: dict):
|
|
603
|
-
v_start = n_fee_parameters.get("v_start", 0)
|
|
604
|
-
v_end = n_fee_parameters.get("v_end", 0)
|
|
605
|
-
sensor_sel_ = n_fee_parameters.get("sensor_sel", self.sensor_sel_both_sides)
|
|
606
|
-
n_final_dump = n_fee_parameters.get("n_final_dump", 4510)
|
|
607
|
-
ccd_readout_order = n_fee_parameters.get("ccd_readout_order", self.default_ccd_readout_order)
|
|
608
|
-
sync_sel = n_fee_parameters.get("sync_sel", 1)
|
|
609
|
-
int_sync_period = n_fee_parameters.get("int_sync_period", 600)
|
|
610
|
-
num_cycles = n_fee_parameters.get("num_cycles", 0)
|
|
611
|
-
self._command_q.put(
|
|
612
|
-
(
|
|
613
|
-
command_set_dump_mode_int_sync,
|
|
614
|
-
[v_start, v_end, sensor_sel_, ccd_readout_order, n_final_dump, int_sync_period, sync_sel],
|
|
615
|
-
{'num_cycles': num_cycles}
|
|
616
|
-
)
|
|
617
|
-
)
|
|
618
|
-
LOGGER.debug("Controller.n_fee_set_dump_mode_int_sync: "
|
|
619
|
-
"Put command_set_dump_mode_int_sync on the Queue.")
|
|
620
|
-
(cmd, response) = self._response_q.get()
|
|
621
|
-
LOGGER.debug(f"Controller.n_fee_set_dump_mode_int_sync returned: ({cmd.__name__}, {response})")
|
|
622
|
-
return response
|
|
623
|
-
|
|
624
|
-
def n_fee_set_full_image_mode(self, n_fee_parameters: dict):
|
|
625
|
-
v_start = n_fee_parameters.get("v_start", 0)
|
|
626
|
-
v_end = n_fee_parameters.get("v_end", 1)
|
|
627
|
-
sensor_sel_ = n_fee_parameters.get("sensor_sel", self.sensor_sel_both_sides)
|
|
628
|
-
ccd_readout_order = n_fee_parameters.get("ccd_readout_order", self.default_ccd_readout_order)
|
|
629
|
-
n_final_dump = n_fee_parameters.get("n_final_dump", 0)
|
|
630
|
-
num_cycles = n_fee_parameters.get("num_cycles", 0)
|
|
631
|
-
dump_mode_int = n_fee_parameters.get("dump_mode_int", False)
|
|
632
|
-
self._command_q.put((command_set_full_image_mode,
|
|
633
|
-
[v_start, v_end, sensor_sel_, ccd_readout_order, n_final_dump],
|
|
634
|
-
{'num_cycles': num_cycles, 'dump_mode_int': dump_mode_int}))
|
|
635
|
-
LOGGER.debug(
|
|
636
|
-
"Controller.n_fee_set_full_image_mode: Put command_set_full_image_mode on the Queue."
|
|
637
|
-
)
|
|
638
|
-
(cmd, response) = self._response_q.get()
|
|
639
|
-
LOGGER.debug(f"Controller.n_fee_set_full_image_mode returned: ({cmd.__name__}, {response})")
|
|
640
|
-
return response
|
|
641
|
-
|
|
642
|
-
def n_fee_set_full_image_mode_int_sync(self, n_fee_parameters: dict):
|
|
643
|
-
v_start = n_fee_parameters.get("v_start", 0)
|
|
644
|
-
v_end = n_fee_parameters.get("v_end", 1)
|
|
645
|
-
sensor_sel_ = n_fee_parameters.get("sensor_sel", self.sensor_sel_both_sides)
|
|
646
|
-
ccd_readout_order = n_fee_parameters.get("ccd_readout_order", self.default_ccd_readout_order)
|
|
647
|
-
n_final_dump = n_fee_parameters.get("n_final_dump", 0)
|
|
648
|
-
int_sync_period = n_fee_parameters.get("int_sync_period", 6250)
|
|
649
|
-
num_cycles = n_fee_parameters.get("num_cycles", 0)
|
|
650
|
-
dump_mode_int = n_fee_parameters.get("dump_mode_int", True)
|
|
651
|
-
self._command_q.put((command_set_full_image_mode_int_sync,
|
|
652
|
-
[v_start, v_end, sensor_sel_, ccd_readout_order, n_final_dump, int_sync_period],
|
|
653
|
-
{'num_cycles': num_cycles, 'dump_mode_int': dump_mode_int}))
|
|
654
|
-
LOGGER.debug(
|
|
655
|
-
"Controller.n_fee_set_full_image_mode_int_sync: Put command_set_full_image_mode_int_sync on the Queue."
|
|
656
|
-
)
|
|
657
|
-
(cmd, response) = self._response_q.get()
|
|
658
|
-
LOGGER.debug(f"Controller.n_fee_set_full_image_mode_int_sync returned: ({cmd.__name__}, {response})")
|
|
659
|
-
return response
|
|
660
|
-
|
|
661
|
-
def n_fee_set_full_image_pattern_mode(self, n_fee_parameters: dict):
|
|
662
|
-
v_start = n_fee_parameters.get("v_start", 0)
|
|
663
|
-
v_end = n_fee_parameters.get("v_end", 1)
|
|
664
|
-
sensor_sel_ = n_fee_parameters.get("sensor_sel", self.sensor_sel_both_sides)
|
|
665
|
-
num_cycles = n_fee_parameters.get("num_cycles", 0)
|
|
666
|
-
self._command_q.put((command_set_full_image_pattern_mode,
|
|
667
|
-
[v_start, v_end, sensor_sel_],
|
|
668
|
-
{'num_cycles': num_cycles}))
|
|
669
|
-
LOGGER.debug(
|
|
670
|
-
"Controller.n_fee_set_full_image_pattern_mode: Put command_set_full_image_pattern_mode "
|
|
671
|
-
"on the Queue."
|
|
672
|
-
)
|
|
673
|
-
(cmd, response) = self._response_q.get()
|
|
674
|
-
LOGGER.debug(
|
|
675
|
-
f"Controller.n_fee_set_full_image_pattern_mode returned: ({cmd.__name__}, {response})"
|
|
676
|
-
)
|
|
677
|
-
return response
|
|
678
|
-
|
|
679
|
-
def n_fee_high_precision_hk_mode(self, n_fee_parameters:dict):
|
|
680
|
-
high_hk = n_fee_parameters.get("high_precision_hk", False)
|
|
681
|
-
self._command_q.put((command_set_high_precision_hk_mode, [high_hk], {}))
|
|
682
|
-
LOGGER.debug(
|
|
683
|
-
"Controller.n_fee_set_high_precision_hk_mode: Put command_set_high_precision_hk_mode "
|
|
684
|
-
"on the Queue."
|
|
685
|
-
)
|
|
686
|
-
(cmd, response) = self._response_q.get()
|
|
687
|
-
LOGGER.debug(
|
|
688
|
-
f"Controller.n_fee_set_high_precision_hk_mode returned: ({cmd.__name__}, {response})"
|
|
689
|
-
)
|
|
690
|
-
return response
|
|
691
|
-
|
|
692
|
-
def n_fee_set_internal_sync(self, n_fee_parameters: dict):
|
|
693
|
-
int_sync_period = n_fee_parameters.get("int_sync_period", 6250)
|
|
694
|
-
self._command_q.put((command_internal_clock, [int_sync_period], {}))
|
|
695
|
-
LOGGER.debug(
|
|
696
|
-
"Controller.n_fee_set_internal_sync: Put command_internal_clock on the Queue."
|
|
697
|
-
)
|
|
698
|
-
(cmd, response) = self._response_q.get()
|
|
699
|
-
LOGGER.debug(
|
|
700
|
-
f"Controller.n_fee_set_internal_sync returned: ({cmd.__name__}, {response})"
|
|
701
|
-
)
|
|
702
|
-
return response
|
|
703
|
-
|
|
704
|
-
def n_fee_set_external_sync(self, n_fee_parameters: dict):
|
|
705
|
-
self._command_q.put((command_external_clock, [], {}))
|
|
706
|
-
LOGGER.debug(
|
|
707
|
-
"Controller.n_fee_set_internal_sync: Put command_internal_clock on the Queue."
|
|
708
|
-
)
|
|
709
|
-
(cmd, response) = self._response_q.get()
|
|
710
|
-
LOGGER.debug(
|
|
711
|
-
f"Controller.n_fee_set_internal_sync returned: ({cmd.__name__}, {response})"
|
|
712
|
-
)
|
|
713
|
-
return response
|
|
714
|
-
|
|
715
|
-
def n_fee_set_register_value(self, reg_name: str, field_name: str, field_value: int):
|
|
716
|
-
self._command_q.put((command_set_register_value, [reg_name, field_name, field_value], {}))
|
|
717
|
-
LOGGER.debug(
|
|
718
|
-
"Controller.n_fee_set_register_value: Put command_set_register_value on the Queue."
|
|
719
|
-
)
|
|
720
|
-
(cmd, response) = self._response_q.get()
|
|
721
|
-
LOGGER.debug(
|
|
722
|
-
f"Controller.n_fee_set_register_value returned: ({cmd.__name__}, {response})"
|
|
723
|
-
)
|
|
724
|
-
return response
|
|
725
|
-
|
|
726
|
-
def n_fee_reset(self):
|
|
727
|
-
self._command_q.put((command_reset, [], {}))
|
|
728
|
-
LOGGER.debug(
|
|
729
|
-
"Controller.n_fee_reset: Put command_reset on the Queue."
|
|
730
|
-
)
|
|
731
|
-
(cmd, response) = self._response_q.get()
|
|
732
|
-
LOGGER.debug(f"Controller.n_fee_reset returned: ({cmd.__name__}, {response})")
|
|
733
|
-
return response
|
|
734
|
-
|
|
735
|
-
def n_fee_set_clear_error_flags(self):
|
|
736
|
-
|
|
737
|
-
self._command_q.put((command_set_clear_error_flags, [], {}))
|
|
738
|
-
|
|
739
|
-
LOGGER.debug("Controller.n_fee_set_clear_error_flags: "
|
|
740
|
-
"Put command_set_clear_error_flags on the Queue.")
|
|
741
|
-
|
|
742
|
-
(cmd, response) = self._response_q.get()
|
|
743
|
-
|
|
744
|
-
LOGGER.debug(f"Controller.n_fee_set_clear_error_flags returned: ({cmd.__name__}, {response})")
|
|
745
|
-
return response
|
|
746
|
-
|
|
747
|
-
def n_fee_set_reverse_clocking(self, n_fee_parameters: dict):
|
|
748
|
-
v_start = n_fee_parameters.get("v_start", 0)
|
|
749
|
-
v_end = n_fee_parameters.get("v_end", 4509)
|
|
750
|
-
sensor_sel_ = n_fee_parameters.get("sensor_sel", self.sensor_sel_both_sides)
|
|
751
|
-
ccd_readout_order = n_fee_parameters.get("ccd_readout_order", self.default_ccd_readout_order)
|
|
752
|
-
n_final_dump = n_fee_parameters.get("n_final_dump", 0)
|
|
753
|
-
num_cycles = n_fee_parameters.get("num_cycles", 0)
|
|
754
|
-
img_clk_dir = n_fee_parameters.get("img_clk_dir", 1)
|
|
755
|
-
reg_clk_dir = n_fee_parameters.get("reg_clk_dir", 0)
|
|
756
|
-
dump_mode_int = n_fee_parameters.get("dump_mode_int", False)
|
|
757
|
-
|
|
758
|
-
self._command_q.put((command_set_reverse_clocking,
|
|
759
|
-
[v_start, v_end, sensor_sel_, ccd_readout_order, n_final_dump, img_clk_dir, reg_clk_dir],
|
|
760
|
-
{'num_cycles': num_cycles, 'dump_mode_int': dump_mode_int}))
|
|
761
|
-
LOGGER.debug(
|
|
762
|
-
"Controller.n_fee_set_reverse_clocking: Put command_set_reverse_clocking on the Queue."
|
|
763
|
-
)
|
|
764
|
-
(cmd, response) = self._response_q.get()
|
|
765
|
-
LOGGER.debug(f"Controller.n_fee_set_reverse_clocking returned: ({cmd.__name__}, {response})")
|
|
766
|
-
return response
|
|
767
|
-
|
|
768
|
-
def n_fee_set_charge_injection(self, n_fee_parameters: dict):
|
|
769
|
-
num_cycles = n_fee_parameters.get("num_cycles", 0)
|
|
770
|
-
v_start = n_fee_parameters.get("v_start", 0)
|
|
771
|
-
v_end = n_fee_parameters.get("v_end", 4509)
|
|
772
|
-
n_final_dump = n_fee_parameters.get("n_final_dump", 0)
|
|
773
|
-
sensor_sel_ = n_fee_parameters.get("sensor_sel", self.sensor_sel_both_sides)
|
|
774
|
-
ccd_readout_order = n_fee_parameters.get("ccd_readout_order", self.default_ccd_readout_order)
|
|
775
|
-
charge_injection_width = n_fee_parameters.get("charge_injection_width", 0)
|
|
776
|
-
charge_injection_gap = n_fee_parameters.get("charge_injection_gap", 0)
|
|
777
|
-
|
|
778
|
-
self._command_q.put(
|
|
779
|
-
(
|
|
780
|
-
command_set_charge_injection,
|
|
781
|
-
[
|
|
782
|
-
v_start, v_end, n_final_dump, sensor_sel_, ccd_readout_order,
|
|
783
|
-
charge_injection_width, charge_injection_gap
|
|
784
|
-
],
|
|
785
|
-
{'num_cycles': num_cycles}
|
|
786
|
-
))
|
|
787
|
-
LOGGER.debug(
|
|
788
|
-
"Controller.n_fee_set_dump_mode: Put command_set_charge_injection on the Queue."
|
|
789
|
-
)
|
|
790
|
-
(cmd, response) = self._response_q.get()
|
|
791
|
-
LOGGER.debug(f"Controller.n_fee_set_charge_injection returned: ({cmd.__name__}, {response})")
|
|
792
|
-
return response
|
|
793
|
-
|
|
794
|
-
def n_fee_set_vgd(self, n_fee_parameters: dict):
|
|
795
|
-
|
|
796
|
-
# The default value for ccd_vgd_config is 0xCFE = hex(int(19.90/5.983*1000))
|
|
797
|
-
# This value is taken from: PLATO-MSSL-PL-FI-0001_9.0_N-FEE_Register_Map Draft A
|
|
798
|
-
|
|
799
|
-
ccd_vgd_config = n_fee_parameters.get("ccd_vgd_config", 19.90)
|
|
800
|
-
|
|
801
|
-
self._command_q.put((command_set_vgd, [ccd_vgd_config], {}))
|
|
802
|
-
|
|
803
|
-
LOGGER.debug("Controller.n_fee_set_vgd: Put command_set_vgd on the Queue.")
|
|
804
|
-
|
|
805
|
-
(cmd, response) = self._response_q.get()
|
|
806
|
-
|
|
807
|
-
LOGGER.debug(f"Controller.n_fee_set_vgd returned: ({cmd.__name__}, {response})")
|
|
808
|
-
return response
|
|
809
|
-
|
|
810
|
-
def n_fee_set_fpga_defaults(self):
|
|
811
|
-
self._command_q.put((command_set_nfee_fpga_defaults, [], {}))
|
|
812
|
-
|
|
813
|
-
LOGGER.debug("Controller.n_fee_set_fpga_defaults: Put n_fee_set_fpga_defaults on the Queue.")
|
|
814
|
-
|
|
815
|
-
(cmd, response) = self._response_q.get()
|
|
816
|
-
|
|
817
|
-
LOGGER.debug(f"Controller.n_fee_set_fpga_defaults returned: ({cmd.__name__}, {response})")
|
|
818
|
-
return response
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
class DPUProxy(Proxy, DPUInterface):
|
|
822
|
-
"""
|
|
823
|
-
The DPUProxy class is used to connect to the DPU Control Server and send commands to the FEE.
|
|
824
|
-
"""
|
|
825
|
-
|
|
826
|
-
def __init__(
|
|
827
|
-
self,
|
|
828
|
-
protocol=CTRL_SETTINGS.PROTOCOL,
|
|
829
|
-
hostname=CTRL_SETTINGS.HOSTNAME,
|
|
830
|
-
port=CTRL_SETTINGS.COMMANDING_PORT,
|
|
831
|
-
):
|
|
832
|
-
"""
|
|
833
|
-
Args:
|
|
834
|
-
protocol: the transport protocol [default is taken from settings file]
|
|
835
|
-
hostname: location of the control server (IP address)
|
|
836
|
-
[default is taken from settings file]
|
|
837
|
-
port: TCP port on which the control server is listening for commands
|
|
838
|
-
[default is taken from settings file]
|
|
839
|
-
"""
|
|
840
|
-
super().__init__(connect_address(protocol, hostname, port), timeout=10_000)
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
class DPUCommand(ClientServerCommand):
|
|
844
|
-
pass
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
class DPUProtocol(CommandProtocol):
|
|
848
|
-
def __init__(self, control_server: ControlServer, transport: SpaceWireInterface):
|
|
849
|
-
|
|
850
|
-
super().__init__()
|
|
851
|
-
|
|
852
|
-
self.control_server = control_server
|
|
853
|
-
|
|
854
|
-
# Set up two queue's to communicate with the DPU Processor Process.
|
|
855
|
-
# The command queue is joinable because the Controller needs to wait for a response in
|
|
856
|
-
# the response queue.
|
|
857
|
-
|
|
858
|
-
self.command_queue = multiprocessing.Queue()
|
|
859
|
-
self.response_queue = multiprocessing.Queue()
|
|
860
|
-
self.priority_queue = multiprocessing.Queue()
|
|
861
|
-
|
|
862
|
-
# Start a separate Process to handle FEE communication
|
|
863
|
-
|
|
864
|
-
self.processor = DPUProcessor(
|
|
865
|
-
transport, self.priority_queue, self.command_queue, self.response_queue)
|
|
866
|
-
self.processor.name = "dpu.processor"
|
|
867
|
-
self.processor.start()
|
|
868
|
-
|
|
869
|
-
self.controller = DPUController(
|
|
870
|
-
self.priority_queue, self.command_queue, self.response_queue)
|
|
871
|
-
|
|
872
|
-
self.load_commands(COMMAND_SETTINGS.Commands, DPUCommand, DPUController)
|
|
873
|
-
|
|
874
|
-
self.build_device_method_lookup_table(self.controller)
|
|
875
|
-
|
|
876
|
-
def get_bind_address(self):
|
|
877
|
-
return bind_address(
|
|
878
|
-
self.control_server.get_communication_protocol(),
|
|
879
|
-
self.control_server.get_commanding_port(),
|
|
880
|
-
)
|
|
881
|
-
|
|
882
|
-
def get_status(self) -> dict:
|
|
883
|
-
status = super().get_status()
|
|
884
|
-
status["DPU Processor"] = "alive" if self.processor.is_alive() else "--"
|
|
885
|
-
return status
|
|
886
|
-
|
|
887
|
-
def get_housekeeping(self) -> dict:
|
|
888
|
-
return {
|
|
889
|
-
"timestamp": format_datetime(),
|
|
890
|
-
}
|
|
891
|
-
|
|
892
|
-
def quit(self):
|
|
893
|
-
self.processor.quit()
|
|
894
|
-
|
|
895
|
-
def not_alive():
|
|
896
|
-
return not self.processor.is_alive()
|
|
897
|
-
|
|
898
|
-
if wait_until(not_alive, timeout=6.5) is False:
|
|
899
|
-
self.processor.join()
|
|
900
|
-
return
|
|
901
|
-
|
|
902
|
-
LOGGER.warning("Terminating DPU Processor")
|
|
903
|
-
self.processor.terminate()
|
|
904
|
-
|
|
905
|
-
# Wait at least 6.25s which is the 'normal' readout cycle time
|
|
906
|
-
|
|
907
|
-
if wait_until(not_alive, timeout=6.5) is False:
|
|
908
|
-
self.processor.join()
|
|
909
|
-
return
|
|
910
|
-
|
|
911
|
-
LOGGER.warning("Killing DPU Processor")
|
|
912
|
-
self.processor.kill()
|
|
913
|
-
self.processor.join()
|
|
914
|
-
|
|
915
|
-
def is_alive(self) -> bool:
|
|
916
|
-
is_alive = self.processor.is_alive()
|
|
917
|
-
|
|
918
|
-
if not is_alive:
|
|
919
|
-
LOGGER.warning(
|
|
920
|
-
f"Process '{self.processor.name}' died for some reason, check for "
|
|
921
|
-
f"an exception in the logging output."
|
|
922
|
-
)
|
|
923
|
-
|
|
924
|
-
return is_alive
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
DPU_PROCESSOR_SETTINGS = Settings.load("DPU Processor")
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
class DPUMonitoring:
|
|
931
|
-
"""
|
|
932
|
-
The DPUMonitoring class allows you to execute a function synchronised to the reception of a
|
|
933
|
-
timecode or a housekeeping packet from the N-FEE.
|
|
934
|
-
|
|
935
|
-
Args:
|
|
936
|
-
timeout: time to wait for a message before a timeout [default=30s]
|
|
937
|
-
"""
|
|
938
|
-
def __init__(self, timeout: float = 30):
|
|
939
|
-
self._context = zmq.Context.instance()
|
|
940
|
-
self._endpoint = connect_address('tcp', DPU_PROCESSOR_SETTINGS.HOSTNAME, DPU_PROCESSOR_SETTINGS.MONITORING_PORT)
|
|
941
|
-
self._multipart = True
|
|
942
|
-
self._timeout = timeout # seconds
|
|
943
|
-
self._retries = 3
|
|
944
|
-
self._socket = None
|
|
945
|
-
self._subscriptions = set()
|
|
946
|
-
|
|
947
|
-
def __enter__(self):
|
|
948
|
-
self.connect()
|
|
949
|
-
return self
|
|
950
|
-
|
|
951
|
-
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
952
|
-
if not self._socket.closed:
|
|
953
|
-
self.disconnect()
|
|
954
|
-
|
|
955
|
-
def connect(self):
|
|
956
|
-
self._socket = self._context.socket(zmq.SUB)
|
|
957
|
-
self._socket.connect(self._endpoint)
|
|
958
|
-
|
|
959
|
-
# subscribe_string = b''
|
|
960
|
-
# self._socket.subscribe(subscribe_string)
|
|
961
|
-
# self._subscriptions.add(subscribe_string)
|
|
962
|
-
|
|
963
|
-
def disconnect(self):
|
|
964
|
-
self._socket.close(linger=0)
|
|
965
|
-
self._subscriptions.clear()
|
|
966
|
-
|
|
967
|
-
def unsubscribe_all(self):
|
|
968
|
-
for sub in self._subscriptions:
|
|
969
|
-
self._socket.unsubscribe(sub)
|
|
970
|
-
self._subscriptions.clear()
|
|
971
|
-
|
|
972
|
-
def unsubscribe(self, sync_id: int):
|
|
973
|
-
subscribe_string = sync_id.to_bytes(1, byteorder='big') if sync_id else b''
|
|
974
|
-
try:
|
|
975
|
-
self._subscriptions.remove(subscribe_string)
|
|
976
|
-
self._socket.unsubscribe(subscribe_string)
|
|
977
|
-
except KeyError:
|
|
978
|
-
LOGGER.warning(
|
|
979
|
-
f"Trying to unsubscribe a key that was not previously subscribed: {subscribe_string}"
|
|
980
|
-
)
|
|
981
|
-
|
|
982
|
-
def subscribe(self, sync_id: int = None):
|
|
983
|
-
subscribe_string = sync_id.to_bytes(1, byteorder='big') if sync_id else b''
|
|
984
|
-
|
|
985
|
-
if subscribe_string in self._subscriptions:
|
|
986
|
-
return
|
|
987
|
-
|
|
988
|
-
self._socket.subscribe(subscribe_string)
|
|
989
|
-
self._subscriptions.add(subscribe_string)
|
|
990
|
-
|
|
991
|
-
def wait_for_timecode(self) -> Tuple[int, str]:
|
|
992
|
-
"""
|
|
993
|
-
Connects to the monitoring socket of the DPU Processor and returns when a TIMECODE
|
|
994
|
-
synchronisation message is received.
|
|
995
|
-
|
|
996
|
-
Returns:
|
|
997
|
-
A tuple of the timecode (int) and the corresponding timestamp (str).
|
|
998
|
-
"""
|
|
999
|
-
self.unsubscribe_all()
|
|
1000
|
-
self.subscribe(MessageIdentifier.SYNC_TIMECODE)
|
|
1001
|
-
|
|
1002
|
-
retries = self._retries
|
|
1003
|
-
|
|
1004
|
-
while True:
|
|
1005
|
-
rlist, _, _ = zmq.select([self._socket], [], [], timeout=self._timeout)
|
|
1006
|
-
if self._socket in rlist:
|
|
1007
|
-
if self._multipart:
|
|
1008
|
-
sync_id, pickle_string = self._socket.recv_multipart()
|
|
1009
|
-
sync_id = int.from_bytes(sync_id, byteorder='big')
|
|
1010
|
-
else:
|
|
1011
|
-
sync_id = MessageIdentifier.ALL
|
|
1012
|
-
pickle_string = self._socket.recv()
|
|
1013
|
-
timecode, timestamp = pickle.loads(pickle_string)
|
|
1014
|
-
|
|
1015
|
-
LOGGER.debug(f"{MessageIdentifier(sync_id).name}, {timecode}, {timestamp}")
|
|
1016
|
-
|
|
1017
|
-
return timecode, timestamp
|
|
1018
|
-
else:
|
|
1019
|
-
retries -= 1
|
|
1020
|
-
LOGGER.warning(f"Monitoring timeout, {retries} retries to go")
|
|
1021
|
-
if retries <= 0:
|
|
1022
|
-
raise TimeoutError(f"DPUMonitoring timed out after {self._retries * self._timeout} seconds.")
|
|
1023
|
-
|
|
1024
|
-
def wait_for_hdf5_filename(self, retries: int = None, timeout: float = None) -> List[Path]:
|
|
1025
|
-
"""
|
|
1026
|
-
Connects to the monitoring socket of the DPU Processor and returns a list of path names that
|
|
1027
|
-
were part of the current registration in the Storage, right before a new registration was
|
|
1028
|
-
initiated.
|
|
1029
|
-
|
|
1030
|
-
This method is mainly intended to be used by processes that need to work with the generated
|
|
1031
|
-
HDF5 files after they have been closed by the DPU Processor. One of these processes is the
|
|
1032
|
-
FITS generation.
|
|
1033
|
-
|
|
1034
|
-
Notes:
|
|
1035
|
-
The path names that are returned are absolute filenames that are specific for the
|
|
1036
|
-
egse-server on which the DPU Processor is running. These files might not be accessible
|
|
1037
|
-
from the machine you are running this monitoring request.
|
|
1038
|
-
|
|
1039
|
-
Returns:
|
|
1040
|
-
A list of path names.
|
|
1041
|
-
Raises:
|
|
1042
|
-
A TimeoutError when no sync data was received from the monitoring socket after 30s.
|
|
1043
|
-
"""
|
|
1044
|
-
self.unsubscribe_all()
|
|
1045
|
-
self.subscribe(MessageIdentifier.HDF5_FILENAMES)
|
|
1046
|
-
|
|
1047
|
-
retries = retries if retries is not None else self._retries
|
|
1048
|
-
timeout = timeout or self._timeout
|
|
1049
|
-
|
|
1050
|
-
while True:
|
|
1051
|
-
rlist, _, _ = zmq.select([self._socket], [], [], timeout=timeout)
|
|
1052
|
-
if self._socket in rlist:
|
|
1053
|
-
if self._multipart:
|
|
1054
|
-
sync_id, pickle_string = self._socket.recv_multipart()
|
|
1055
|
-
sync_id = int.from_bytes(sync_id, byteorder='big')
|
|
1056
|
-
else:
|
|
1057
|
-
sync_id = MessageIdentifier.ALL
|
|
1058
|
-
pickle_string = self._socket.recv()
|
|
1059
|
-
filenames = pickle.loads(pickle_string)
|
|
1060
|
-
|
|
1061
|
-
LOGGER.debug(f"{MessageIdentifier(sync_id).name}, {filenames}")
|
|
1062
|
-
|
|
1063
|
-
return filenames
|
|
1064
|
-
else:
|
|
1065
|
-
retries -= 1
|
|
1066
|
-
# LOGGER.warning(f"Monitoring timeout, {retries} retries to go")
|
|
1067
|
-
if retries <= 0:
|
|
1068
|
-
raise TimeoutError(f"DPUMonitoring timed out after {self._retries * self._timeout} seconds.")
|
|
1069
|
-
|
|
1070
|
-
def wait_number_of_pulses(self, num_pulses: int) -> None:
|
|
1071
|
-
"""
|
|
1072
|
-
Wait for a number of pulses (long and short), then return.
|
|
1073
|
-
|
|
1074
|
-
When the number of pulses has been received, the function returns right after the timecode synchronisation
|
|
1075
|
-
message. Any command that is sent to the N-FEE immediately after this function returns will be processes within
|
|
1076
|
-
that same readout frame, i.e. before the next sync pulse.
|
|
1077
|
-
|
|
1078
|
-
Args:
|
|
1079
|
-
num_pulses: the number of sync pulses to wait before returning.
|
|
1080
|
-
|
|
1081
|
-
Raises:
|
|
1082
|
-
A TimeoutError when no sync data was received from the monitoring socket after 30s.
|
|
1083
|
-
"""
|
|
1084
|
-
self.unsubscribe_all()
|
|
1085
|
-
self.subscribe(MessageIdentifier.SYNC_TIMECODE)
|
|
1086
|
-
|
|
1087
|
-
retries = self._retries
|
|
1088
|
-
|
|
1089
|
-
LOGGER.debug(f"Waiting for {num_pulses} pulses...")
|
|
1090
|
-
|
|
1091
|
-
while True:
|
|
1092
|
-
rlist, _, _ = zmq.select([self._socket], [], [], timeout=self._timeout)
|
|
1093
|
-
if self._socket in rlist:
|
|
1094
|
-
if self._multipart:
|
|
1095
|
-
sync_id, pickle_string = self._socket.recv_multipart()
|
|
1096
|
-
sync_id = int.from_bytes(sync_id, byteorder='big')
|
|
1097
|
-
else:
|
|
1098
|
-
sync_id = MessageIdentifier.ALL
|
|
1099
|
-
pickle_string = self._socket.recv()
|
|
1100
|
-
timecode, timestamp = pickle.loads(pickle_string)
|
|
1101
|
-
|
|
1102
|
-
num_pulses -= 1
|
|
1103
|
-
LOGGER.debug(f"{MessageIdentifier(sync_id).name}, {timecode=}, {timestamp=}, {num_pulses=}")
|
|
1104
|
-
if num_pulses <= 0:
|
|
1105
|
-
return
|
|
1106
|
-
|
|
1107
|
-
retries = self._retries # reset the number of retries
|
|
1108
|
-
else:
|
|
1109
|
-
retries -= 1
|
|
1110
|
-
LOGGER.warning(f"Monitoring timeout, {retries} retries to go")
|
|
1111
|
-
if retries <= 0:
|
|
1112
|
-
raise TimeoutError(f"DPUMonitoring timed out after {self._retries * self._timeout} seconds.")
|
|
1113
|
-
|
|
1114
|
-
def wait_num_cycles(self, num_cycles: int, return_on_frame: int = 3) -> int:
|
|
1115
|
-
"""
|
|
1116
|
-
Wait for a number of long pulses (cycles), then return.
|
|
1117
|
-
|
|
1118
|
-
This method will wait for full cycles, i.e. 4 readouts in external sync mode, 1 readout in internal sync mode,
|
|
1119
|
-
and will return immediately after receiving the HK sync pulse for the last frame in the requested cycle, i.e.
|
|
1120
|
-
frame number == 3 for external sync and frame number == 0 for internal sync. If an RMAP command is then sent
|
|
1121
|
-
when the function returns, it will still be executed within that frame and the changed register settings will
|
|
1122
|
-
become active on the next pulse, which is a long pulse, the start of the next cycle.
|
|
1123
|
-
That way we do not lose a cycle.
|
|
1124
|
-
|
|
1125
|
-
Args:
|
|
1126
|
-
num_cycles: the number of full cycles to wait before returning
|
|
1127
|
-
return_on_frame: choose the readout frame on which to return [default = 3]
|
|
1128
|
-
|
|
1129
|
-
Returns:
|
|
1130
|
-
Zero (0) when no cycles were waited because num_cycles <= 0, otherwise return value > 0.
|
|
1131
|
-
|
|
1132
|
-
Raises:
|
|
1133
|
-
A TimeoutError when no sync data was received from the monitoring socket after 30s.
|
|
1134
|
-
"""
|
|
1135
|
-
self.unsubscribe_all()
|
|
1136
|
-
self.subscribe(MessageIdentifier.SYNC_HK_PACKET)
|
|
1137
|
-
|
|
1138
|
-
retries = self._retries
|
|
1139
|
-
count = 0
|
|
1140
|
-
|
|
1141
|
-
if num_cycles <= 0:
|
|
1142
|
-
LOGGER.debug(f"{num_cycles=}, no cycles waited, returned immediately")
|
|
1143
|
-
return count
|
|
1144
|
-
|
|
1145
|
-
LOGGER.debug(f"Waiting for {num_cycles} cycles...")
|
|
1146
|
-
|
|
1147
|
-
with Timer("Loop cycles") as timer, DPUProxy() as dpu_proxy:
|
|
1148
|
-
|
|
1149
|
-
# When we are in external sync mode, we need to skip the current cycle,
|
|
1150
|
-
# because the requested changes in the register -> FPGA will ony occur on
|
|
1151
|
-
# the next long pulse. No need for this when in internal sync mode.
|
|
1152
|
-
|
|
1153
|
-
sync_mode = dpu_proxy.n_fee_get_sync_mode()
|
|
1154
|
-
if sync_mode == 0:
|
|
1155
|
-
num_cycles += 1
|
|
1156
|
-
|
|
1157
|
-
while True:
|
|
1158
|
-
count += 1
|
|
1159
|
-
rlist, _, _ = zmq.select([self._socket], [], [], timeout=self._timeout)
|
|
1160
|
-
if self._socket in rlist:
|
|
1161
|
-
if self._multipart:
|
|
1162
|
-
sync_id, pickle_string = self._socket.recv_multipart()
|
|
1163
|
-
sync_id = int.from_bytes(sync_id, byteorder='big')
|
|
1164
|
-
else:
|
|
1165
|
-
sync_id = MessageIdentifier.ALL
|
|
1166
|
-
pickle_string = self._socket.recv()
|
|
1167
|
-
status = pickle.loads(pickle_string)
|
|
1168
|
-
|
|
1169
|
-
LOGGER.debug(f"{MessageIdentifier(sync_id).name}, {to_string(status[0])}")
|
|
1170
|
-
|
|
1171
|
-
sync_mode = dpu_proxy.n_fee_get_sync_mode()
|
|
1172
|
-
|
|
1173
|
-
if (sync_mode == 1) or (packet := status[0]) and packet.frame_number == return_on_frame:
|
|
1174
|
-
num_cycles -= 1
|
|
1175
|
-
|
|
1176
|
-
LOGGER.debug(f"NUM_CYCLES={num_cycles}, {sync_mode=}")
|
|
1177
|
-
|
|
1178
|
-
if num_cycles <= 0:
|
|
1179
|
-
return count
|
|
1180
|
-
|
|
1181
|
-
retries = self._retries # reset the number of retries
|
|
1182
|
-
else:
|
|
1183
|
-
retries -= 1
|
|
1184
|
-
LOGGER.warning(f"Monitoring timeout, {retries} retries to go")
|
|
1185
|
-
if retries <= 0:
|
|
1186
|
-
raise TimeoutError(f"DPUMonitoring timed out after {self._retries * self._timeout} seconds.")
|
|
1187
|
-
|
|
1188
|
-
timer.log_elapsed()
|
|
1189
|
-
|
|
1190
|
-
def monitor_all(self):
|
|
1191
|
-
self.subscribe()
|
|
1192
|
-
|
|
1193
|
-
retries = self._retries
|
|
1194
|
-
|
|
1195
|
-
while True:
|
|
1196
|
-
rlist, _, _ = zmq.select([self._socket], [], [], timeout=self._timeout)
|
|
1197
|
-
if self._socket in rlist:
|
|
1198
|
-
if self._multipart:
|
|
1199
|
-
sync_id, pickle_string = self._socket.recv_multipart()
|
|
1200
|
-
sync_id = int.from_bytes(sync_id, byteorder='big')
|
|
1201
|
-
else:
|
|
1202
|
-
sync_id = MessageIdentifier.ALL
|
|
1203
|
-
pickle_string = self._socket.recv()
|
|
1204
|
-
|
|
1205
|
-
msg = pickle.loads(pickle_string)
|
|
1206
|
-
if sync_id == MessageIdentifier.SYNC_TIMECODE:
|
|
1207
|
-
msg = f"timestamp={msg[1]}, timecode={msg[0]}"
|
|
1208
|
-
LOGGER.info(f"{MessageIdentifier(sync_id).name}, {msg}")
|
|
1209
|
-
elif sync_id == MessageIdentifier.SYNC_HK_PACKET:
|
|
1210
|
-
msg = f"timestamp={msg[1]}, packet type={to_string(msg[0])}"
|
|
1211
|
-
LOGGER.info(f"{MessageIdentifier(sync_id).name}, {msg}")
|
|
1212
|
-
elif sync_id == MessageIdentifier.NUM_CYCLES:
|
|
1213
|
-
LOGGER.info(f"{MessageIdentifier(sync_id).name}, num_cycles={msg}")
|
|
1214
|
-
|
|
1215
|
-
retries = self._retries # reset the number of retries
|
|
1216
|
-
else:
|
|
1217
|
-
retries -= 1
|
|
1218
|
-
LOGGER.warning(f"Monitoring timeout, {retries} retries to go")
|
|
1219
|
-
if retries <= 0:
|
|
1220
|
-
raise TimeoutError(f"DPUMonitoring timed out after {self._retries * self._timeout} seconds.")
|
|
1221
|
-
|
|
1222
|
-
def wait_until_synced_num_cycles_is_zero(self):
|
|
1223
|
-
"""
|
|
1224
|
-
Wait until the synced num_cycles turns zero, then return. The synced num_cycles is
|
|
1225
|
-
the num_cycles that is maintained by the DPU Processor and which is distributed by the
|
|
1226
|
-
DPU Processor on every 400ms pulse.
|
|
1227
|
-
|
|
1228
|
-
Raises:
|
|
1229
|
-
A TimeoutError when no sync data was received from the monitoring socket after 30s.
|
|
1230
|
-
"""
|
|
1231
|
-
self.unsubscribe_all()
|
|
1232
|
-
self.subscribe(MessageIdentifier.NUM_CYCLES)
|
|
1233
|
-
|
|
1234
|
-
retries = self._retries
|
|
1235
|
-
|
|
1236
|
-
while True:
|
|
1237
|
-
rlist, _, _ = zmq.select([self._socket], [], [], timeout=self._timeout)
|
|
1238
|
-
if self._socket in rlist:
|
|
1239
|
-
if self._multipart:
|
|
1240
|
-
sync_id, pickle_string = self._socket.recv_multipart()
|
|
1241
|
-
sync_id = int.from_bytes(sync_id, byteorder='big')
|
|
1242
|
-
else:
|
|
1243
|
-
sync_id = MessageIdentifier.ALL
|
|
1244
|
-
pickle_string = self._socket.recv()
|
|
1245
|
-
|
|
1246
|
-
synced_num_cycles = pickle.loads(pickle_string)
|
|
1247
|
-
|
|
1248
|
-
LOGGER.info(f"{MessageIdentifier(sync_id).name} = {synced_num_cycles}")
|
|
1249
|
-
|
|
1250
|
-
if synced_num_cycles <= 0:
|
|
1251
|
-
return
|
|
1252
|
-
|
|
1253
|
-
retries = self._retries # reset the number of retries
|
|
1254
|
-
else:
|
|
1255
|
-
retries -= 1
|
|
1256
|
-
LOGGER.warning(f"Monitoring timeout, {retries} retries to go")
|
|
1257
|
-
if retries <= 0:
|
|
1258
|
-
raise TimeoutError(f"DPUMonitoring timed out after {self._retries * self._timeout} seconds.")
|
|
1259
|
-
|
|
1260
|
-
def do(self, func: Callable, *args, **kwargs):
|
|
1261
|
-
return func(*args, **kwargs)
|
|
1262
|
-
|
|
1263
|
-
def on_long_pulse_do(self, func: Callable, *args, **kwargs):
|
|
1264
|
-
"""
|
|
1265
|
-
Connects to the monitoring socket of the DPU Processor and executes the given function
|
|
1266
|
-
when the frame_number == 0, i.e. right after a long pulse.
|
|
1267
|
-
|
|
1268
|
-
Args:
|
|
1269
|
-
func (Callable): the function to synchronise
|
|
1270
|
-
*args: any arguments to pass to the function
|
|
1271
|
-
**kwargs: any keyword arguments to pass to the function
|
|
1272
|
-
|
|
1273
|
-
Returns:
|
|
1274
|
-
The return value of the called function.
|
|
1275
|
-
|
|
1276
|
-
Raises:
|
|
1277
|
-
A TimeoutError when no sync data was received from the monitoring socket after 30s.
|
|
1278
|
-
"""
|
|
1279
|
-
return self.on_frame_number_do(0, func, *args, **kwargs)
|
|
1280
|
-
|
|
1281
|
-
def on_frame_number_do(self, frame_number: int, func: Callable, *args, **kwargs):
|
|
1282
|
-
"""
|
|
1283
|
-
Connects to the monitoring socket of the DPU Processor and executes the given function
|
|
1284
|
-
when the given frame_number is reached. This allows to send N-FEE commands right before the long pulse.
|
|
1285
|
-
|
|
1286
|
-
Args:
|
|
1287
|
-
frame_number: the frame number on which to execute the function
|
|
1288
|
-
func (Callable): the function to synchronise
|
|
1289
|
-
*args: any arguments to pass to the function
|
|
1290
|
-
**kwargs: any keyword arguments to pass to the function
|
|
1291
|
-
|
|
1292
|
-
Returns:
|
|
1293
|
-
The return value of the called function.
|
|
1294
|
-
|
|
1295
|
-
Raises:
|
|
1296
|
-
A TimeoutError when no sync data was received from the monitoring socket after 30s.
|
|
1297
|
-
"""
|
|
1298
|
-
self.unsubscribe_all()
|
|
1299
|
-
self.subscribe(MessageIdentifier.SYNC_HK_PACKET)
|
|
1300
|
-
|
|
1301
|
-
retries = self._retries
|
|
1302
|
-
|
|
1303
|
-
while True:
|
|
1304
|
-
rlist, _, _ = zmq.select([self._socket], [], [], timeout=self._timeout)
|
|
1305
|
-
if self._socket in rlist:
|
|
1306
|
-
if self._multipart:
|
|
1307
|
-
sync_id, pickle_string = self._socket.recv_multipart()
|
|
1308
|
-
sync_id = int.from_bytes(sync_id, byteorder='big')
|
|
1309
|
-
else:
|
|
1310
|
-
sync_id = MessageIdentifier.ALL
|
|
1311
|
-
pickle_string = self._socket.recv()
|
|
1312
|
-
status = pickle.loads(pickle_string)
|
|
1313
|
-
|
|
1314
|
-
LOGGER.debug(f"{MessageIdentifier(sync_id).name}, {status[0]}")
|
|
1315
|
-
|
|
1316
|
-
packet: DataPacketType = status[0]
|
|
1317
|
-
if packet and packet.frame_number == frame_number:
|
|
1318
|
-
return func(*args, **kwargs)
|
|
1319
|
-
else:
|
|
1320
|
-
retries -= 1
|
|
1321
|
-
LOGGER.warning(f"Monitoring timeout, {retries} retries to go")
|
|
1322
|
-
if retries <= 0:
|
|
1323
|
-
raise TimeoutError(f"DPUMonitoring timed out after {self._retries * self._timeout} seconds.")
|
|
1324
|
-
|
|
1325
|
-
|
|
1326
|
-
@dataclass
|
|
1327
|
-
class DPUInternals:
|
|
1328
|
-
|
|
1329
|
-
# The number of readout cycles requested by the user. A cycle is the period between two long
|
|
1330
|
-
# pulses (400ms). When num_cycle == 0, the N-FEE will be instructed to go to dump mode, when
|
|
1331
|
-
# num_cycle < 0 nothing will be done.
|
|
1332
|
-
num_cycles: int
|
|
1333
|
-
|
|
1334
|
-
# The expected last packet flags tell you if for a certain ccd side and packet a last packet
|
|
1335
|
-
# flag is expected. This is similar to saying if such a packet is to be expected from the N-FEE.
|
|
1336
|
-
expected_last_packet_flags: List[int]
|
|
1337
|
-
|
|
1338
|
-
# DUMP mode is not a real N-FEE mode, but is defined in the DPU Processor to make sure the CCDs
|
|
1339
|
-
# will not saturate when we are not reading out image data. The conditions for a dump mode are
|
|
1340
|
-
# the register map flags 'digitise_en' being False and 'DG_high' being True.
|
|
1341
|
-
dump_mode: bool = False
|
|
1342
|
-
|
|
1343
|
-
# The internal sync flag is set to True whenever the register map parameter 'sync_sel' is True.
|
|
1344
|
-
internal_sync: bool = False
|
|
1345
|
-
|
|
1346
|
-
# This flag is set to True when the N-FEE shall be put into dump mode internal sync after
|
|
1347
|
-
# num_cycles becomes zero.
|
|
1348
|
-
dump_mode_int: bool = False
|
|
1349
|
-
|
|
1350
|
-
# The current frame number. This value needs to be updated as soon as the housekeeping packet
|
|
1351
|
-
# is received.
|
|
1352
|
-
frame_number: int = -1
|
|
1353
|
-
|
|
1354
|
-
# Enumeration with the information about E and F, based on the setup (camera-dependent)
|
|
1355
|
-
ccd_sides_enum: Enum = None
|
|
1356
|
-
|
|
1357
|
-
# Enumeration with the sensor_sel
|
|
1358
|
-
sensor_sel_enum: Enum = None
|
|
1359
|
-
|
|
1360
|
-
# Mapping of the CCD identifier to the binary representation (loaded from the Setup)
|
|
1361
|
-
ccd_id_to_bin: List[int] = None
|
|
1362
|
-
|
|
1363
|
-
# The clear_error_flags shall be executed on every readout, i.e. every 200ms and 400ms pulse.
|
|
1364
|
-
clear_error_flags = False
|
|
1365
|
-
|
|
1366
|
-
# The number of cycles that will be used for slicing the FITS files. This parameter is
|
|
1367
|
-
# saved in the HDF5 file upon reception.
|
|
1368
|
-
slicing_num_cycles = 0
|
|
1369
|
-
|
|
1370
|
-
# When in internal sync the ccd_readout_order is not used like in external sync mode.
|
|
1371
|
-
# Each readout is done on the same CCD, i.e. the first CCD number in the ccd_readout_order list.
|
|
1372
|
-
# Therefore, we will rotate this list on each readout in internal sync dump mode to guarantee
|
|
1373
|
-
# each CCD is cleared out.
|
|
1374
|
-
|
|
1375
|
-
# Initialise cycling of CCDs in internal sync dump mode to the default CCD numbering.
|
|
1376
|
-
current_ccd_readout_order = CCD_NUMBERS
|
|
1377
|
-
|
|
1378
|
-
# The cycle_count goes from [0 -> 3] to make sure that, during internal sync dump mode, we have cleared out
|
|
1379
|
-
# all four CCDs. A clear-out cycle can only be interrupted when cycle_count == 0, at that time all commands
|
|
1380
|
-
# on the queue will be executed.
|
|
1381
|
-
cycle_count = 0
|
|
1382
|
-
|
|
1383
|
-
def reset_int_sync_dump_mode(self, ccd_numbers: list = None):
|
|
1384
|
-
"""
|
|
1385
|
-
Resets the cycle_count to zero (0) and the current ccd_readout_order to the given ccd_numbers.
|
|
1386
|
-
When ccd_numbers is None the default CCD readout order will be used, i.e. CCD_NUMBERS, [1,2,3,4].
|
|
1387
|
-
|
|
1388
|
-
Args:
|
|
1389
|
-
ccd_numbers: a list of four CCD numbers going from 1 to 4.
|
|
1390
|
-
|
|
1391
|
-
"""
|
|
1392
|
-
self.current_ccd_readout_order = ccd_numbers or CCD_NUMBERS
|
|
1393
|
-
self.cycle_count = 0
|
|
1394
|
-
|
|
1395
|
-
def int_sync_cycle_dump_mode(self):
|
|
1396
|
-
"""Returns True if we are in internal sync dump mode."""
|
|
1397
|
-
return self.internal_sync and self.dump_mode and self.num_cycles < 0
|
|
1398
|
-
|
|
1399
|
-
def is_start_of_cycle(self):
|
|
1400
|
-
"""
|
|
1401
|
-
Returns True if in the first readout in this cycle, i.e. frame number is 0.
|
|
1402
|
-
"""
|
|
1403
|
-
|
|
1404
|
-
return self.frame_number == 0
|
|
1405
|
-
|
|
1406
|
-
def is_end_of_cycle(self):
|
|
1407
|
-
"""
|
|
1408
|
-
Returns True if in the last readout in this cycle.
|
|
1409
|
-
Note that, when in internal sync mode, this method always returns True.
|
|
1410
|
-
"""
|
|
1411
|
-
return True if self.internal_sync else self.frame_number == 3
|
|
1412
|
-
|
|
1413
|
-
def is_400ms_pulse(self):
|
|
1414
|
-
return self.frame_number == 0
|
|
1415
|
-
|
|
1416
|
-
def is_200ms_pulse(self):
|
|
1417
|
-
return self.frame_number in [1, 2, 3]
|
|
1418
|
-
|
|
1419
|
-
def update(self, n_fee_state: NFEEState.StateTuple):
|
|
1420
|
-
self.dump_mode = n_fee_state.ccd_mode_config == n_fee_mode.FULL_IMAGE_MODE and not bool(n_fee_state.digitise_en)
|
|
1421
|
-
self.internal_sync = bool(n_fee_state.sync_sel)
|
|
1422
|
-
self.expected_last_packet_flags = create_expected_last_packet_flags(n_fee_state, self.sensor_sel_enum)
|
|
1423
|
-
|
|
1424
|
-
|
|
1425
|
-
class DPUProcessor(multiprocessing.Process):
|
|
1426
|
-
"""
|
|
1427
|
-
The DPU Processor handles all interactions with the FEE. It reads the packets from the FEE
|
|
1428
|
-
within the readout time frame, and sends commands to the FEE through the RMAP protocol.
|
|
1429
|
-
|
|
1430
|
-
The commands are read from a commanding queue which is shared between the DPU Processor and
|
|
1431
|
-
the DPU Controller. Any response from the FEE is put on the response queue which is also
|
|
1432
|
-
shared between the processor and the controller.
|
|
1433
|
-
|
|
1434
|
-
The transport mechanism that is used to read and write SpaceWire packets is abstracted into a
|
|
1435
|
-
SpaceWireInterface. That allows us to interact with the FEE through different hardware
|
|
1436
|
-
channels, e.g. a SpaceWire interface (DSI) or a ZeroMQ DEALER-DEALER protocol for
|
|
1437
|
-
communication with the FEE Simulator.
|
|
1438
|
-
"""
|
|
1439
|
-
|
|
1440
|
-
def __init__(
|
|
1441
|
-
self,
|
|
1442
|
-
transport: SpaceWireInterface,
|
|
1443
|
-
priority_queue: multiprocessing.Queue,
|
|
1444
|
-
command_queue: multiprocessing.Queue,
|
|
1445
|
-
response_queue: multiprocessing.Queue,
|
|
1446
|
-
):
|
|
1447
|
-
|
|
1448
|
-
super().__init__()
|
|
1449
|
-
|
|
1450
|
-
self._transport = transport
|
|
1451
|
-
self._priority_q = priority_queue
|
|
1452
|
-
self._command_q = command_queue
|
|
1453
|
-
self._response_q = response_queue
|
|
1454
|
-
self.register_map = RegisterMap("N-FEE")
|
|
1455
|
-
self._quit_event = multiprocessing.Event()
|
|
1456
|
-
|
|
1457
|
-
# These will be properly initialized when the register map is read from the N-FEE
|
|
1458
|
-
|
|
1459
|
-
self._n_fee_state = NFEEState()
|
|
1460
|
-
|
|
1461
|
-
|
|
1462
|
-
def run(self):
|
|
1463
|
-
|
|
1464
|
-
setup = GlobalState.setup
|
|
1465
|
-
|
|
1466
|
-
self._dpu_internals = DPUInternals(
|
|
1467
|
-
num_cycles=-1,
|
|
1468
|
-
expected_last_packet_flags=[False, False, False, False],
|
|
1469
|
-
dump_mode=False,
|
|
1470
|
-
internal_sync=False,
|
|
1471
|
-
frame_number=-1,
|
|
1472
|
-
ccd_sides_enum=setup.camera.fee.ccd_sides.enum,
|
|
1473
|
-
sensor_sel_enum=setup.camera.fee.sensor_sel.enum,
|
|
1474
|
-
ccd_id_to_bin=setup.camera.fee.ccd_numbering.CCD_ID_TO_BIN,
|
|
1475
|
-
)
|
|
1476
|
-
|
|
1477
|
-
# The DPU Processor runs in a different process and since ZeroMQ Sockets are not
|
|
1478
|
-
# thread/process safe, we have to recreate the ZeroMQHandler attached to the egse.logger
|
|
1479
|
-
# in this process.
|
|
1480
|
-
import egse.logger
|
|
1481
|
-
egse.logger.replace_zmq_handler()
|
|
1482
|
-
|
|
1483
|
-
LOGGER.info("DPU Processor started.")
|
|
1484
|
-
|
|
1485
|
-
self._killer = SignalCatcher()
|
|
1486
|
-
|
|
1487
|
-
# Setup a SpaceWire connection with the FEE (Simulator) and
|
|
1488
|
-
# open a Storage proxy to save all the data packets.
|
|
1489
|
-
|
|
1490
|
-
origin_spw_data = N_FEE_SETTINGS.ORIGIN_SPW_DATA
|
|
1491
|
-
origin_spw_data_type = DATA_TYPE[N_FEE_SETTINGS.ORIGIN_SPW_DATA_TYPE]
|
|
1492
|
-
|
|
1493
|
-
ctx: zmq.Context = zmq.Context().instance()
|
|
1494
|
-
|
|
1495
|
-
# Setup monitoring socket
|
|
1496
|
-
|
|
1497
|
-
mon_sock: zmq.Socket = ctx.socket(zmq.PUB)
|
|
1498
|
-
endpoint = bind_address("tcp", DPU_PROCESSOR_SETTINGS.MONITORING_PORT)
|
|
1499
|
-
mon_sock.bind(endpoint)
|
|
1500
|
-
LOGGER.info(f"DPU Processor sending monitoring sync signals to {endpoint}.")
|
|
1501
|
-
|
|
1502
|
-
# Setup data distribution socket
|
|
1503
|
-
|
|
1504
|
-
dist_sock: zmq.Socket = ctx.socket(zmq.PUB)
|
|
1505
|
-
endpoint = bind_address("tcp", DPU_PROCESSOR_SETTINGS.DATA_DISTRIBUTION_PORT)
|
|
1506
|
-
dist_sock.setsockopt(zmq.SNDHWM, 0) # never block on sending msg
|
|
1507
|
-
dist_sock.bind(endpoint)
|
|
1508
|
-
|
|
1509
|
-
LOGGER.info(f"DPU Processor sending SpW data to {endpoint}.")
|
|
1510
|
-
|
|
1511
|
-
with self._transport, StorageProxy() as storage, ConfigurationManagerProxy() as cm:
|
|
1512
|
-
LOGGER.info("SpaceWire Transport has been connected.")
|
|
1513
|
-
self._transport.configure()
|
|
1514
|
-
LOGGER.info("SpaceWire Transport has been configured.")
|
|
1515
|
-
|
|
1516
|
-
LOGGER.info(f"Register {origin_spw_data} to Storage")
|
|
1517
|
-
register_to_storage_manager(storage, origin_spw_data)
|
|
1518
|
-
|
|
1519
|
-
# Before going into the wile-loop, read the full register from the N-FEE and initialise
|
|
1520
|
-
# the register map.
|
|
1521
|
-
|
|
1522
|
-
try:
|
|
1523
|
-
self.initialise_register_map()
|
|
1524
|
-
save_register_map(self.register_map, storage, origin_spw_data, dist_sock)
|
|
1525
|
-
save_format_version(storage, origin_spw_data)
|
|
1526
|
-
save_obsid(storage, origin_spw_data, cm.get_obsid().return_code)
|
|
1527
|
-
except Abort:
|
|
1528
|
-
LOGGER.warning("The DPU Processor is aborting....")
|
|
1529
|
-
unregister_from_storage_manager(storage, origin_spw_data)
|
|
1530
|
-
LOGGER.info(f"The DPU Processor unregistered {origin_spw_data} from the Storage.")
|
|
1531
|
-
return
|
|
1532
|
-
except Exception as exc:
|
|
1533
|
-
LOGGER.error(exc, exc_info=exc)
|
|
1534
|
-
|
|
1535
|
-
# Initialise the N-FEE state from the register map
|
|
1536
|
-
|
|
1537
|
-
self._n_fee_state.update_at_400ms(self.register_map)
|
|
1538
|
-
|
|
1539
|
-
# Initialise the DPU internals from the N-FEE State
|
|
1540
|
-
|
|
1541
|
-
self._dpu_internals.update(self._n_fee_state.get_state())
|
|
1542
|
-
|
|
1543
|
-
LOGGER.debug(f"{self._dpu_internals.dump_mode=}")
|
|
1544
|
-
LOGGER.debug(f"{self._dpu_internals.internal_sync=}")
|
|
1545
|
-
LOGGER.debug(f"{self._dpu_internals.expected_last_packet_flags=}")
|
|
1546
|
-
|
|
1547
|
-
# Initialise the data attributes, they will be added as attributes to the data group
|
|
1548
|
-
# in the HDF5 file.
|
|
1549
|
-
|
|
1550
|
-
data_attr = self._n_fee_state.get_state()._asdict()
|
|
1551
|
-
|
|
1552
|
-
# Initialise the start_time. This is needed, because when a NoTimeCodeError occurs
|
|
1553
|
-
# the variable will not be initialised resulting in a critical error.
|
|
1554
|
-
|
|
1555
|
-
start_time = time.perf_counter()
|
|
1556
|
-
|
|
1557
|
-
try:
|
|
1558
|
-
LOGGER.info("Going into the while True loop...")
|
|
1559
|
-
while True:
|
|
1560
|
-
|
|
1561
|
-
try:
|
|
1562
|
-
# First two packets are a Timecode and a HK packet ------------------------
|
|
1563
|
-
|
|
1564
|
-
tc_packet, timestamp, start_time = read_timecode(self._transport)
|
|
1565
|
-
|
|
1566
|
-
hk_packet, timestamp = read_hk_packet(self._transport)
|
|
1567
|
-
|
|
1568
|
-
self._dpu_internals.frame_number = hk_packet.type.frame_number
|
|
1569
|
-
self._dpu_internals.clear_error_flags = True
|
|
1570
|
-
|
|
1571
|
-
# Create a new HDF5 file for each readout cycle ----------------------------
|
|
1572
|
-
|
|
1573
|
-
if self._dpu_internals.is_start_of_cycle():
|
|
1574
|
-
with Timer("Creating a new data file"):
|
|
1575
|
-
new_spw_data_file(storage, self.register_map, origin_spw_data,
|
|
1576
|
-
origin_spw_data_type, mon_sock, dist_sock)
|
|
1577
|
-
save_obsid(storage, origin_spw_data, cm.get_obsid().return_code)
|
|
1578
|
-
save_num_cycles(storage, origin_spw_data, self._dpu_internals.num_cycles)
|
|
1579
|
-
|
|
1580
|
-
# Update the N-FEE state (FPGA) --------------------------------------------
|
|
1581
|
-
|
|
1582
|
-
if self._dpu_internals.is_400ms_pulse():
|
|
1583
|
-
self._n_fee_state.update_at_400ms(self.register_map)
|
|
1584
|
-
elif self._dpu_internals.is_200ms_pulse():
|
|
1585
|
-
self._n_fee_state.update_at_200ms(self.register_map)
|
|
1586
|
-
else:
|
|
1587
|
-
pass # we are entering the loop for the first time
|
|
1588
|
-
|
|
1589
|
-
# Update the DPU internals from the N-FEE state
|
|
1590
|
-
|
|
1591
|
-
self._dpu_internals.update(self._n_fee_state.get_state())
|
|
1592
|
-
|
|
1593
|
-
# Process and save the timecode and HK packet ------------------------------
|
|
1594
|
-
|
|
1595
|
-
process_timecode(tc_packet, timestamp, storage, origin_spw_data,
|
|
1596
|
-
self._dpu_internals.frame_number, mon_sock, dist_sock)
|
|
1597
|
-
|
|
1598
|
-
process_hk_packet(hk_packet, timestamp, storage, origin_spw_data,
|
|
1599
|
-
self._dpu_internals.frame_number, mon_sock, dist_sock)
|
|
1600
|
-
|
|
1601
|
-
process_high_priority_commands(self._priority_q, self._response_q,
|
|
1602
|
-
self._n_fee_state.get_state(),
|
|
1603
|
-
self._dpu_internals, self.register_map)
|
|
1604
|
-
|
|
1605
|
-
# On any new readout cycle (400ms pulse), update the state and the internals
|
|
1606
|
-
|
|
1607
|
-
# FIXME: Why is this test done here and not at the end of the while loop
|
|
1608
|
-
# when all data has been read?
|
|
1609
|
-
|
|
1610
|
-
if self._dpu_internals.is_400ms_pulse():
|
|
1611
|
-
|
|
1612
|
-
pickle_string = pickle.dumps(self._dpu_internals.num_cycles)
|
|
1613
|
-
msg_id = MessageIdentifier.NUM_CYCLES.to_bytes(1, 'big')
|
|
1614
|
-
num_cycles_msg = [msg_id, pickle_string]
|
|
1615
|
-
dist_sock.send_multipart(num_cycles_msg)
|
|
1616
|
-
mon_sock.send_multipart(num_cycles_msg)
|
|
1617
|
-
|
|
1618
|
-
# decrement num_cycles, this can go negative which is interpreted as
|
|
1619
|
-
# not doing anything...
|
|
1620
|
-
|
|
1621
|
-
self._dpu_internals.num_cycles -= 1 # check issue #917 before changing this line
|
|
1622
|
-
|
|
1623
|
-
LOGGER.debug(
|
|
1624
|
-
f"HK: frame number={hk_packet.type.frame_number}, dump mode={self._dpu_internals.dump_mode}, num_cycles={self._dpu_internals.num_cycles}"
|
|
1625
|
-
)
|
|
1626
|
-
|
|
1627
|
-
LOGGER.debug(
|
|
1628
|
-
f"FEE mode in register map: {n_fee_mode(self.register_map['ccd_mode_config']).name}"
|
|
1629
|
-
)
|
|
1630
|
-
|
|
1631
|
-
save_slicing_parameter(storage, origin_spw_data, self._dpu_internals.slicing_num_cycles)
|
|
1632
|
-
|
|
1633
|
-
if self._dpu_internals.is_end_of_cycle():
|
|
1634
|
-
|
|
1635
|
-
# When we are at the end of our requested num_cycles, go to DUMP mode
|
|
1636
|
-
|
|
1637
|
-
# FIXME: review if this is the right place and if the dump command will
|
|
1638
|
-
# be executed at the right moment, e.g. are there no commands on
|
|
1639
|
-
# the queue anymore?
|
|
1640
|
-
|
|
1641
|
-
if self._dpu_internals.num_cycles == 0:
|
|
1642
|
-
if self._dpu_internals.dump_mode_int:
|
|
1643
|
-
self._dpu_internals.reset_int_sync_dump_mode()
|
|
1644
|
-
dump_mode_command = command_set_dump_mode_int_sync
|
|
1645
|
-
else:
|
|
1646
|
-
dump_mode_command = command_set_dump_mode
|
|
1647
|
-
self._command_q.put((dump_mode_command, [], {'response': False}))
|
|
1648
|
-
|
|
1649
|
-
# Then we might get data packets depending on the FEE mode -----------------
|
|
1650
|
-
|
|
1651
|
-
mode = hk_packet.type.mode
|
|
1652
|
-
LOGGER.debug(f"FEE mode in HK packet: {n_fee_mode(mode).name}")
|
|
1653
|
-
|
|
1654
|
-
data_attr = update_data_attributes(data_attr, self._n_fee_state.get_state())
|
|
1655
|
-
|
|
1656
|
-
with Timer("Read and process data packets"):
|
|
1657
|
-
read_and_process_data_packets(
|
|
1658
|
-
self._transport, storage, origin_spw_data, start_time, mode,
|
|
1659
|
-
self.register_map, data_attr, self._dpu_internals, dist_sock)
|
|
1660
|
-
|
|
1661
|
-
# Read HK packet from N-FEE memory map
|
|
1662
|
-
# see #2478 [https://github.com/IvS-KULeuven/plato-common-egse/issues/2478]
|
|
1663
|
-
|
|
1664
|
-
time.sleep(0.012) # add 12ms to make sure HK data has been updated on the N-FEE
|
|
1665
|
-
|
|
1666
|
-
with Timer("Read and process updated HK data"):
|
|
1667
|
-
hk_data, timestamp = read_updated_hk_data(self._transport)
|
|
1668
|
-
process_updated_hk_data(hk_data, timestamp, storage, origin_spw_data,
|
|
1669
|
-
self._dpu_internals.frame_number, mon_sock, dist_sock)
|
|
1670
|
-
|
|
1671
|
-
if self._dpu_internals.int_sync_cycle_dump_mode():
|
|
1672
|
-
LOGGER.warning("Cycling CCD readout in internal sync")
|
|
1673
|
-
|
|
1674
|
-
# When we are in internal sync and dump mode, we need to cycle through the four CCDs, and
|
|
1675
|
-
# we need an atomic block of four clear outs.
|
|
1676
|
-
|
|
1677
|
-
internals = self._dpu_internals
|
|
1678
|
-
internals.current_ccd_readout_order = rotate_list(internals.current_ccd_readout_order, 1)
|
|
1679
|
-
internals.cycle_count += 1
|
|
1680
|
-
ccd_readout_order = _get_ccd_readout_order(
|
|
1681
|
-
internals.current_ccd_readout_order, internals.ccd_id_to_bin
|
|
1682
|
-
)
|
|
1683
|
-
# LOGGER.info(f"{internals.current_ccd_readout_order = }, {ccd_readout_order = }, "
|
|
1684
|
-
# f"{internals.cycle_count = }")
|
|
1685
|
-
_ = command_set_readout_order(self._transport, self.register_map, ccd_readout_order)
|
|
1686
|
-
|
|
1687
|
-
# We agreed to have atomic blocks of 4 clear-outs such that all four CCDs would always
|
|
1688
|
-
# be dumped. So, whenever we are within one such atomic block, don't execute any DPU
|
|
1689
|
-
# commands.
|
|
1690
|
-
|
|
1691
|
-
if internals.cycle_count < 4:
|
|
1692
|
-
LOGGER.debug(
|
|
1693
|
-
f"[1] {internals.current_ccd_readout_order = }, {ccd_readout_order = }, "
|
|
1694
|
-
f"{internals.cycle_count = }")
|
|
1695
|
-
else:
|
|
1696
|
-
internals.cycle_count = 0
|
|
1697
|
-
LOGGER.debug(
|
|
1698
|
-
f"[2] {internals.current_ccd_readout_order = }, {ccd_readout_order = }, "
|
|
1699
|
-
f"{internals.cycle_count = }")
|
|
1700
|
-
|
|
1701
|
-
except NoBytesReceivedError as exc:
|
|
1702
|
-
# LOGGER.debug(f"No bytes received: {exc}")
|
|
1703
|
-
pass
|
|
1704
|
-
except NoTimeCodeError as exc:
|
|
1705
|
-
LOGGER.warning("Reading the next timecode packet failed.")
|
|
1706
|
-
LOGGER.debug("Traceback for NoTimecodeError:", exc_info=exc)
|
|
1707
|
-
except NoHousekeepingPacketError as exc:
|
|
1708
|
-
LOGGER.warning("Reading the next housekeeping packet failed.")
|
|
1709
|
-
LOGGER.debug("Traceback for NoHousekeepingPacketError:", exc_info=exc)
|
|
1710
|
-
except NoDataPacketError as exc:
|
|
1711
|
-
LOGGER.warning("Reading the next data packet failed.")
|
|
1712
|
-
LOGGER.debug("Traceback for NoDataPacketError:", exc_info=exc)
|
|
1713
|
-
except TimecodeTimeoutError as exc:
|
|
1714
|
-
# LOGGER.debug("Waiting for the next timecode.")
|
|
1715
|
-
pass
|
|
1716
|
-
except TimeExceededError as exc:
|
|
1717
|
-
LOGGER.warning(
|
|
1718
|
-
"Time to retrieve data packets in this readout cycle exceeded "
|
|
1719
|
-
"4.0 seconds."
|
|
1720
|
-
)
|
|
1721
|
-
LOGGER.debug("Traceback for TimeExceededError:", exc_info=exc)
|
|
1722
|
-
# FIXME:
|
|
1723
|
-
# same here as above, make sure the DPU Processor doesn't crash. This last
|
|
1724
|
-
# catching also means that Commands on the Queue will still be executed if
|
|
1725
|
-
# there is an error. What needs to be checked here is that the Command should
|
|
1726
|
-
# probably be send in the 'save zone' between 4.0s and 6.25s.
|
|
1727
|
-
except Exception as exc:
|
|
1728
|
-
LOGGER.error(exc, exc_info=True)
|
|
1729
|
-
traceback.print_exc()
|
|
1730
|
-
|
|
1731
|
-
# LOGGER.info(
|
|
1732
|
-
# f"Time past after reading all packets from FEE:"
|
|
1733
|
-
# f" {time.perf_counter() - start_time:.3f}s"
|
|
1734
|
-
# )
|
|
1735
|
-
|
|
1736
|
-
# Process high priority commands
|
|
1737
|
-
|
|
1738
|
-
process_high_priority_commands(
|
|
1739
|
-
self._priority_q, self._response_q,
|
|
1740
|
-
self._n_fee_state.get_state(), self._dpu_internals, self.register_map)
|
|
1741
|
-
|
|
1742
|
-
# When we are in internal sync dump mode, we need atomic blocks of 4 readouts such that all four
|
|
1743
|
-
# CCDs will be cleared out. The cycle count goes from [0 -> 3] so, we only send commands when
|
|
1744
|
-
# cycle count == 0.
|
|
1745
|
-
# LOGGER.debug(f"{self._dpu_internals.cycle_count = }")
|
|
1746
|
-
if self._dpu_internals.int_sync_cycle_dump_mode() and self._dpu_internals.cycle_count != 0:
|
|
1747
|
-
continue
|
|
1748
|
-
|
|
1749
|
-
# Then, we might want to send some RMAP commands -------------------------------
|
|
1750
|
-
|
|
1751
|
-
# When we are in the 2s RMAP window, send the commands.
|
|
1752
|
-
# Waiting till 4s have passed is apparently not needed, commands can be sent as
|
|
1753
|
-
# soon as no packets will be received anymore, even if the time elapsed is
|
|
1754
|
-
# less than 4s.
|
|
1755
|
-
# But the following two lines might be uncommented for testing purposes.
|
|
1756
|
-
|
|
1757
|
-
# while time.perf_counter() < start_time + 4.0:
|
|
1758
|
-
# time.sleep(0.1)
|
|
1759
|
-
|
|
1760
|
-
try:
|
|
1761
|
-
send_commands_to_n_fee(
|
|
1762
|
-
self._transport, storage, origin_spw_data,
|
|
1763
|
-
self.register_map, self._command_q, self._response_q,
|
|
1764
|
-
self._dpu_internals
|
|
1765
|
-
)
|
|
1766
|
-
except NFEECommandError as exc:
|
|
1767
|
-
# Error is already logged in the send_commands_to_n_fee() function
|
|
1768
|
-
pass
|
|
1769
|
-
|
|
1770
|
-
# LOGGER.debug(
|
|
1771
|
-
# f"Time past after sending commands to FEE:"
|
|
1772
|
-
# f" {time.perf_counter() - start_time:.3f}s"
|
|
1773
|
-
# )
|
|
1774
|
-
|
|
1775
|
-
# Terminate the DPU Processor when the quit event flag has been set by the
|
|
1776
|
-
# commanding protocol.
|
|
1777
|
-
|
|
1778
|
-
if self._quit_event.is_set() or self._killer.term_signal_received:
|
|
1779
|
-
LOGGER.info("Quit event is set, terminating..")
|
|
1780
|
-
break
|
|
1781
|
-
|
|
1782
|
-
except (Exception,) as exc:
|
|
1783
|
-
LOGGER.critical(
|
|
1784
|
-
"A fatal error occurred in the DPU Processor, needs to be restarted!",
|
|
1785
|
-
exc_info=exc
|
|
1786
|
-
)
|
|
1787
|
-
# re-raise the exception such that it will bubble up at a higher level.
|
|
1788
|
-
raise
|
|
1789
|
-
finally:
|
|
1790
|
-
LOGGER.debug("Unregistering from Storage Manager.")
|
|
1791
|
-
unregister_from_storage_manager(storage, origin_spw_data)
|
|
1792
|
-
|
|
1793
|
-
mon_sock.close(linger=0)
|
|
1794
|
-
dist_sock.close(linger=0)
|
|
1795
|
-
# ctx.destroy()
|
|
1796
|
-
|
|
1797
|
-
def quit(self):
|
|
1798
|
-
LOGGER.warning("Sending a Quit event to the DPU Processor.")
|
|
1799
|
-
self._quit_event.set()
|
|
1800
|
-
|
|
1801
|
-
def initialise_register_map(self):
|
|
1802
|
-
|
|
1803
|
-
# FIXME:
|
|
1804
|
-
# The DPU Processor shall not crash, therefore we shall catch all Exceptions thrown.
|
|
1805
|
-
# Log the exceptions as an error and continue here. It must be tested what the exact
|
|
1806
|
-
# harm is when doing this and if we need some further action before proceeding.
|
|
1807
|
-
|
|
1808
|
-
# The DPU Processor is only initialised properly after reading the full register from
|
|
1809
|
-
# the N-FEE. This can only be done within the time window we have for sending RMAP
|
|
1810
|
-
# commands. Therefore we need to make sure we are in a safe time window for sending
|
|
1811
|
-
# RMAP commands.
|
|
1812
|
-
|
|
1813
|
-
LOGGER.info('Initialise Register Map from N-FEE')
|
|
1814
|
-
|
|
1815
|
-
# First wait until a timecode is received
|
|
1816
|
-
|
|
1817
|
-
while True:
|
|
1818
|
-
terminator, packet = self._transport.read_packet(timeout=200)
|
|
1819
|
-
if self._killer.term_signal_received:
|
|
1820
|
-
raise Abort("A SIGTERM signal was received for this process")
|
|
1821
|
-
if packet is None or len(packet) in (0, 1):
|
|
1822
|
-
continue
|
|
1823
|
-
if is_timecode(packet):
|
|
1824
|
-
break
|
|
1825
|
-
|
|
1826
|
-
start_time = time.perf_counter()
|
|
1827
|
-
|
|
1828
|
-
LOGGER.debug(f"Timecode received {packet=}")
|
|
1829
|
-
|
|
1830
|
-
while time.perf_counter() < start_time + 4.2:
|
|
1831
|
-
terminator, packet = self._transport.read_packet(timeout=200)
|
|
1832
|
-
if packet is None:
|
|
1833
|
-
msg = f"time passed {time.perf_counter() - start_time:0.3f}"
|
|
1834
|
-
else:
|
|
1835
|
-
msg = packet[:10]
|
|
1836
|
-
LOGGER.debug(f"Discarding packet: {msg}")
|
|
1837
|
-
|
|
1838
|
-
LOGGER.info(f"Time passed since last timecode {time.perf_counter() - start_time:0.3f}s")
|
|
1839
|
-
LOGGER.info(
|
|
1840
|
-
'In safe time window for sending RMAP command, getting full register..'
|
|
1841
|
-
)
|
|
1842
|
-
|
|
1843
|
-
command_sync_register_map(self._transport, self.register_map)
|
|
1844
|
-
|
|
1845
|
-
LOGGER.debug(self.register_map)
|
|
1846
|
-
|
|
1847
|
-
|
|
1848
|
-
def save_register_map(
|
|
1849
|
-
reg_map: RegisterMap, storage: StorageProxy, origin: str, dist_socket: zmq.Socket):
|
|
1850
|
-
|
|
1851
|
-
reg_memory_map = reg_map.get_memory_map_as_ndarray()
|
|
1852
|
-
|
|
1853
|
-
LOGGER.debug("Saving register map")
|
|
1854
|
-
|
|
1855
|
-
response = storage.save(
|
|
1856
|
-
{
|
|
1857
|
-
"origin": origin,
|
|
1858
|
-
"data": {
|
|
1859
|
-
"/register/": reg_memory_map
|
|
1860
|
-
}
|
|
1861
|
-
}
|
|
1862
|
-
)
|
|
1863
|
-
|
|
1864
|
-
LOGGER.debug(f"Response from saving Register Map: {response}")
|
|
1865
|
-
|
|
1866
|
-
pickle_string = pickle.dumps(reg_memory_map)
|
|
1867
|
-
msg_id = MessageIdentifier.N_FEE_REGISTER_MAP.to_bytes(1, 'big')
|
|
1868
|
-
dist_socket.send_multipart([msg_id, pickle_string])
|
|
1869
|
-
|
|
1870
|
-
|
|
1871
|
-
def register_to_storage_manager(proxy: StorageProxy, origin: str):
|
|
1872
|
-
rc = proxy.new_registration(
|
|
1873
|
-
item={
|
|
1874
|
-
"origin": origin,
|
|
1875
|
-
"persistence_class": HDF5,
|
|
1876
|
-
"prep": {
|
|
1877
|
-
"mode": "w-",
|
|
1878
|
-
},
|
|
1879
|
-
},
|
|
1880
|
-
use_counter=True
|
|
1881
|
-
)
|
|
1882
|
-
LOGGER.info(f"{rc=!s}")
|
|
1883
|
-
if rc and not rc.successful:
|
|
1884
|
-
LOGGER.warning(f"Couldn't register to the Storage manager: {rc}")
|
|
1885
|
-
|
|
1886
|
-
|
|
1887
|
-
def unregister_from_storage_manager(proxy: StorageProxy, origin: str):
|
|
1888
|
-
|
|
1889
|
-
try:
|
|
1890
|
-
rc = proxy.unregister({"origin": origin})
|
|
1891
|
-
if not rc.successful:
|
|
1892
|
-
LOGGER.warning(f"Couldn't unregister from the Storage manager: {rc}")
|
|
1893
|
-
|
|
1894
|
-
except ConnectionError as exc:
|
|
1895
|
-
LOGGER.warning(f"Couldn't connect to the Storage manager for de-registration: {exc}")
|
|
1896
|
-
|
|
1897
|
-
|
|
1898
|
-
def new_spw_data_file(
|
|
1899
|
-
proxy: StorageProxy, reg_map: RegisterMap, origin: str, data_type: Type[PersistenceLayer],
|
|
1900
|
-
mon_socket: zmq.Socket, dist_socket: zmq.Socket
|
|
1901
|
-
):
|
|
1902
|
-
"""
|
|
1903
|
-
Open a new data file to store CCD data.
|
|
1904
|
-
|
|
1905
|
-
Args:
|
|
1906
|
-
- proxy: Storage manager.
|
|
1907
|
-
- origin: the origin for which to create a new file
|
|
1908
|
-
- reg_map: Register map.
|
|
1909
|
-
"""
|
|
1910
|
-
|
|
1911
|
-
LOGGER.debug(f"Create a new data file for {origin} in the Storage")
|
|
1912
|
-
|
|
1913
|
-
# prep = {
|
|
1914
|
-
# "expected_last_packet_flags": get_expected_last_packet_flags(reg_map),
|
|
1915
|
-
# }
|
|
1916
|
-
#
|
|
1917
|
-
# for name in CRUCIAL_REGISTER_PARAMETERS:
|
|
1918
|
-
# prep[name] = reg_map[name]
|
|
1919
|
-
|
|
1920
|
-
item = {
|
|
1921
|
-
"origin": origin,
|
|
1922
|
-
"persistence_class": data_type,
|
|
1923
|
-
"prep": {},
|
|
1924
|
-
}
|
|
1925
|
-
|
|
1926
|
-
# Retrieve the current filenames that will be available for processing as soon as the new
|
|
1927
|
-
# HDF5 file is registered and created by the storage manager. This should be done, of course,
|
|
1928
|
-
# before the new-registration call!
|
|
1929
|
-
|
|
1930
|
-
hdf5_filenames = proxy.get_filenames(item={"origin": origin})
|
|
1931
|
-
|
|
1932
|
-
response = proxy.new_registration(item=item, use_counter=True)
|
|
1933
|
-
|
|
1934
|
-
LOGGER.debug(f"Response from new_registration: {response}")
|
|
1935
|
-
|
|
1936
|
-
save_format_version(proxy, origin)
|
|
1937
|
-
|
|
1938
|
-
# Save the Register Map that is used for the current readout cycle
|
|
1939
|
-
|
|
1940
|
-
save_register_map(reg_map, proxy, origin, dist_socket)
|
|
1941
|
-
|
|
1942
|
-
LOGGER.info(f"HDF5 files ready for processing: {hdf5_filenames=}")
|
|
1943
|
-
|
|
1944
|
-
pickle_string = pickle.dumps(hdf5_filenames)
|
|
1945
|
-
msg_id = MessageIdentifier.HDF5_FILENAMES.to_bytes(1, 'big')
|
|
1946
|
-
mon_socket.send_multipart([msg_id, pickle_string])
|
|
1947
|
-
|
|
1948
|
-
|
|
1949
|
-
def save_format_version(proxy: StorageProxy, origin: str):
|
|
1950
|
-
|
|
1951
|
-
# 2.0 - introduced the format_version
|
|
1952
|
-
# 2.1 - Added obsid as a dataset to the HDF5 file
|
|
1953
|
-
# 2.2 - Multiple commands can now be saved under the same frame number
|
|
1954
|
-
# 2.3 - introduced /dpu/num_cycles attribute
|
|
1955
|
-
# 2.4 - introduced /dpu/slicing_num_cycles attribute
|
|
1956
|
-
# 2.5 - introduced /{frame number}/hk_data dataset
|
|
1957
|
-
|
|
1958
|
-
major_version = 2
|
|
1959
|
-
minor_version = 5
|
|
1960
|
-
|
|
1961
|
-
item_data = {
|
|
1962
|
-
"/versions/format_version/": "format version of HDF5 file",
|
|
1963
|
-
"/versions/format_version:ATTRS": [
|
|
1964
|
-
("major_version", major_version),
|
|
1965
|
-
("minor_version", minor_version)
|
|
1966
|
-
]
|
|
1967
|
-
}
|
|
1968
|
-
item = {
|
|
1969
|
-
"origin": origin,
|
|
1970
|
-
"data": item_data,
|
|
1971
|
-
}
|
|
1972
|
-
response = proxy.save(item)
|
|
1973
|
-
LOGGER.debug(f"Response from saving format_version: {response}")
|
|
1974
|
-
|
|
1975
|
-
|
|
1976
|
-
def save_obsid(proxy: StorageProxy, origin: str, obsid: ObservationIdentifier):
|
|
1977
|
-
|
|
1978
|
-
item_data = {
|
|
1979
|
-
"/obsid": str(obsid),
|
|
1980
|
-
}
|
|
1981
|
-
item = {
|
|
1982
|
-
"origin": origin,
|
|
1983
|
-
"data": item_data,
|
|
1984
|
-
}
|
|
1985
|
-
response = proxy.save(item)
|
|
1986
|
-
LOGGER.debug(f"Response from saving OBSID: {response}")
|
|
1987
|
-
|
|
1988
|
-
|
|
1989
|
-
def save_num_cycles(proxy: StorageProxy, origin: str, num_cycles: int):
|
|
1990
|
-
"""Save the number of cycles to the storage. This will only save if num_cycles >= 0."""
|
|
1991
|
-
|
|
1992
|
-
# Only save num_cycles >= 0, the DPU Processor understands when num_cycles is negative,
|
|
1993
|
-
# but for the HDF5 file we want to keep it clean and always have num_cycles >= 0.
|
|
1994
|
-
|
|
1995
|
-
num_cycles = max(num_cycles, 0)
|
|
1996
|
-
|
|
1997
|
-
item_data = {
|
|
1998
|
-
"/dpu/": "DPU specific parameters",
|
|
1999
|
-
"/dpu/:ATTRS": [
|
|
2000
|
-
("num_cycles", num_cycles),
|
|
2001
|
-
]
|
|
2002
|
-
}
|
|
2003
|
-
item = {
|
|
2004
|
-
"origin": origin,
|
|
2005
|
-
"data": item_data,
|
|
2006
|
-
}
|
|
2007
|
-
response = proxy.save(item)
|
|
2008
|
-
LOGGER.debug(f"Response from saving NUM_CYCLES: {response}")
|
|
2009
|
-
|
|
2010
|
-
|
|
2011
|
-
def save_slicing_parameter(proxy: StorageProxy, origin: str, slicing_num_cycles: int):
|
|
2012
|
-
"""Save the number of cycles to use for slicing to the storage."""
|
|
2013
|
-
|
|
2014
|
-
item_data = {
|
|
2015
|
-
"/dpu/:ATTRS": [
|
|
2016
|
-
("slicing_num_cycles", slicing_num_cycles),
|
|
2017
|
-
]
|
|
2018
|
-
}
|
|
2019
|
-
item = {
|
|
2020
|
-
"origin": origin,
|
|
2021
|
-
"data": item_data,
|
|
2022
|
-
}
|
|
2023
|
-
response = proxy.save(item)
|
|
2024
|
-
LOGGER.debug(f"Response from saving SLICING_NUM_CYCLES: {response}")
|
|
2025
|
-
|
|
2026
|
-
|
|
2027
|
-
def update_data_attributes(attr: dict, n_fee_state: NFEEState.StateTuple) -> Dict[str, Any]:
|
|
2028
|
-
"""
|
|
2029
|
-
Collect parameter/value pairs that will be added to the data group as attributes.
|
|
2030
|
-
|
|
2031
|
-
Args:
|
|
2032
|
-
attr (dict): the current attributes that need to be updated
|
|
2033
|
-
n_fee_state: the current state of the N-FEE
|
|
2034
|
-
|
|
2035
|
-
Returns:
|
|
2036
|
-
Updated data attributes.
|
|
2037
|
-
"""
|
|
2038
|
-
|
|
2039
|
-
attr.update(n_fee_state._asdict())
|
|
2040
|
-
return attr
|
|
2041
|
-
|
|
2042
|
-
|
|
2043
|
-
def read_timecode(transport: SpaceWireInterface) -> (TimecodePacket, str, float):
|
|
2044
|
-
"""
|
|
2045
|
-
Reads the next Timecode packet from the N-FEE.
|
|
2046
|
-
|
|
2047
|
-
Args:
|
|
2048
|
-
transport: the SpaceWire interfaces that is used for communication to the N-FEE
|
|
2049
|
-
|
|
2050
|
-
Returns:
|
|
2051
|
-
The timecode and associated timestamp, and the approximate start time for this readout cycle.
|
|
2052
|
-
Raises:
|
|
2053
|
-
NoTimecodeError when the timecode could not be read.
|
|
2054
|
-
"""
|
|
2055
|
-
terminator, packet = transport.read_packet(timeout=100)
|
|
2056
|
-
timestamp = format_datetime()
|
|
2057
|
-
|
|
2058
|
-
if terminator is None and packet is None:
|
|
2059
|
-
raise TimecodeTimeoutError()
|
|
2060
|
-
|
|
2061
|
-
# Start time taken as closely as possible to timecode reception, this start_time is
|
|
2062
|
-
# returned to be used in further functions called in the outer loop.
|
|
2063
|
-
|
|
2064
|
-
start_time = time.perf_counter()
|
|
2065
|
-
# LOGGER.debug(f"Time set: {start_time}")
|
|
2066
|
-
|
|
2067
|
-
bytes_received = len(packet)
|
|
2068
|
-
|
|
2069
|
-
# The following check is to cope with loss of connection when either the
|
|
2070
|
-
# FEE simulator crashes or the connection dropped for some other reason.
|
|
2071
|
-
# We will receive one packet with 0 or 1 bytes.
|
|
2072
|
-
|
|
2073
|
-
if bytes_received in {0, 1}:
|
|
2074
|
-
raise NoBytesReceivedError(f"{bytes_received} bytes received, lost connection to FEE?")
|
|
2075
|
-
|
|
2076
|
-
if not is_timecode(packet):
|
|
2077
|
-
packet = SpaceWirePacket.create_packet(packet)
|
|
2078
|
-
raise NoTimeCodeError(f"Expected Timecode Packet, but got {packet.__class__.__name__}")
|
|
2079
|
-
|
|
2080
|
-
tc_packet: TimecodePacket = SpaceWirePacket.create_packet(packet)
|
|
2081
|
-
|
|
2082
|
-
LOGGER.info(f"Timecode received: {tc_packet.timecode}")
|
|
2083
|
-
|
|
2084
|
-
return tc_packet, timestamp, start_time
|
|
2085
|
-
|
|
2086
|
-
|
|
2087
|
-
def process_timecode(tc_packet: TimecodePacket, timestamp: str,
|
|
2088
|
-
storage: StorageProxy, origin_spw_data: str, frame_number: int,
|
|
2089
|
-
mon_socket: zmq.Socket, dist_socket: zmq.Socket):
|
|
2090
|
-
"""
|
|
2091
|
-
Saves the timecode and associated timestamp for this frame. The timecode and timestamp
|
|
2092
|
-
are also published on the monitoring and data distribution message queue.
|
|
2093
|
-
|
|
2094
|
-
Args:
|
|
2095
|
-
tc_packet: the timecode packet
|
|
2096
|
-
timestamp: a timestamp associated with the reception of the timecode
|
|
2097
|
-
frame_number: the current frame number
|
|
2098
|
-
storage: the proxy that is used to communicate with the Storage manager
|
|
2099
|
-
origin_spw_data: the registration identifier for the Storage manager, for the SpW data
|
|
2100
|
-
mon_socket: the ZeroMQ socket to which monitoring sync signals are sent
|
|
2101
|
-
dist_socket: the ZeroMQ socket to which SpW data is sent (for real-time view)
|
|
2102
|
-
|
|
2103
|
-
Returns:
|
|
2104
|
-
Nothing.
|
|
2105
|
-
"""
|
|
2106
|
-
LOGGER.debug(f"Saving timecode packet: {tc_packet.timecode=}, {frame_number=}")
|
|
2107
|
-
|
|
2108
|
-
response = storage.save(
|
|
2109
|
-
{
|
|
2110
|
-
"origin": origin_spw_data,
|
|
2111
|
-
"data":
|
|
2112
|
-
{
|
|
2113
|
-
f"/{frame_number}/timecode": tc_packet,
|
|
2114
|
-
f"/{frame_number}/timecode:ATTRS": [("timestamp", timestamp)],
|
|
2115
|
-
}
|
|
2116
|
-
}
|
|
2117
|
-
)
|
|
2118
|
-
|
|
2119
|
-
LOGGER.debug(f"Response from saving Timecode: {response}")
|
|
2120
|
-
|
|
2121
|
-
pickle_string = pickle.dumps((tc_packet.timecode, timestamp))
|
|
2122
|
-
mon_socket.send_multipart([MessageIdentifier.SYNC_TIMECODE.to_bytes(1, "big"), pickle_string])
|
|
2123
|
-
dist_socket.send_multipart([MessageIdentifier.SYNC_TIMECODE.to_bytes(1, "big"), pickle_string])
|
|
2124
|
-
|
|
2125
|
-
|
|
2126
|
-
def read_updated_hk_data(transport: SpaceWireInterface) -> (HousekeepingData, str):
|
|
2127
|
-
"""
|
|
2128
|
-
Reads the memory map that contains the housekeeping information from the N-FEE.
|
|
2129
|
-
The memory map is returned as a HousekeepingData object.
|
|
2130
|
-
|
|
2131
|
-
This is not the same as a housekeeping packet that is read from the N-FEE. For
|
|
2132
|
-
that refer to the function `read_hk_packet()`.
|
|
2133
|
-
|
|
2134
|
-
Args:
|
|
2135
|
-
transport: the SpaceWire interfaces that is used for communication to the N-FEE
|
|
2136
|
-
|
|
2137
|
-
Returns:
|
|
2138
|
-
The HK data packet and its associated timestamp as a string.
|
|
2139
|
-
"""
|
|
2140
|
-
timestamp = format_datetime()
|
|
2141
|
-
|
|
2142
|
-
data = command_get_hk_information(transport, None, 0x000_0700, 0x90)
|
|
2143
|
-
hk_data = HousekeepingData(data)
|
|
2144
|
-
|
|
2145
|
-
msg = f"Updated housekeeping retrieved... {hk_data.frame_counter = }, {hk_data.timecode = }"
|
|
2146
|
-
if hk_data.error_flags:
|
|
2147
|
-
msg += f", error_flags = 0b{hk_data.error_flags:032b}"
|
|
2148
|
-
LOGGER.warning(msg)
|
|
2149
|
-
else:
|
|
2150
|
-
LOGGER.info(msg)
|
|
2151
|
-
|
|
2152
|
-
return hk_data, timestamp
|
|
2153
|
-
|
|
2154
|
-
def process_updated_hk_data(hk_data: HousekeepingData, timestamp: str, storage: StorageProxy,
|
|
2155
|
-
origin: str, frame_number: int,
|
|
2156
|
-
mon_socket: zmq.Socket, dist_socket: zmq.Socket):
|
|
2157
|
-
"""
|
|
2158
|
-
Saves the housekeeping data and associated timestamp for this frame. The data and timestamp
|
|
2159
|
-
are also published on the monitoring message queue.
|
|
2160
|
-
|
|
2161
|
-
Args:
|
|
2162
|
-
hk_data: the HousekeepingData object
|
|
2163
|
-
timestamp: the timestamp associated with the reception of this data
|
|
2164
|
-
frame_number: the current frame number
|
|
2165
|
-
storage: the proxy that is used to communicate with the Storage manager
|
|
2166
|
-
origin: the registration identifier for the Storage manager
|
|
2167
|
-
mon_socket: the ZeroMQ socket to which monitoring sync signals are sent
|
|
2168
|
-
dist_socket: the ZeroMQ socket to which SpW data is sent (for real-time view)
|
|
2169
|
-
|
|
2170
|
-
Returns:
|
|
2171
|
-
Nothing.
|
|
2172
|
-
"""
|
|
2173
|
-
LOGGER.debug(f"Saving updated Housekeeping data: {hk_data.frame_counter = }, {hk_data.timecode = }, {hk_data.frame_number = }")
|
|
2174
|
-
|
|
2175
|
-
response = storage.save(
|
|
2176
|
-
{
|
|
2177
|
-
"origin": origin,
|
|
2178
|
-
"data":
|
|
2179
|
-
{
|
|
2180
|
-
f"/{frame_number}/hk_data": hk_data,
|
|
2181
|
-
f"/{frame_number}/hk_data:ATTRS": [("timestamp", timestamp)],
|
|
2182
|
-
}
|
|
2183
|
-
}
|
|
2184
|
-
)
|
|
2185
|
-
|
|
2186
|
-
LOGGER.debug(f"Response from saving updated Housekeeping data: {response}")
|
|
2187
|
-
|
|
2188
|
-
pickle_string = pickle.dumps((hk_data.error_flags, hk_data.frame_counter, timestamp))
|
|
2189
|
-
mon_socket.send_multipart([MessageIdentifier.SYNC_ERROR_FLAGS.to_bytes(1, "big"), pickle_string])
|
|
2190
|
-
|
|
2191
|
-
msg_id = MessageIdentifier.SYNC_HK_DATA.to_bytes(1, 'big')
|
|
2192
|
-
|
|
2193
|
-
pickle_string = pickle.dumps((hk_data, timestamp))
|
|
2194
|
-
dist_socket.send_multipart([msg_id, pickle_string])
|
|
2195
|
-
|
|
2196
|
-
|
|
2197
|
-
def read_hk_packet(transport: SpaceWireInterface) -> (HousekeepingPacket, str):
|
|
2198
|
-
"""
|
|
2199
|
-
Read the next Housekeeping Packet from the N-FEE.
|
|
2200
|
-
|
|
2201
|
-
Args:
|
|
2202
|
-
transport: the SpaceWire interfaces that is used for communication to the N-FEE
|
|
2203
|
-
Raises:
|
|
2204
|
-
NoHousekeepingPacketError when the next packet is not a `HousekeepingPacket`.
|
|
2205
|
-
Returns:
|
|
2206
|
-
the received housekeeping packet and the timestamp.
|
|
2207
|
-
"""
|
|
2208
|
-
terminator, packet = transport.read_packet()
|
|
2209
|
-
timestamp = format_datetime()
|
|
2210
|
-
|
|
2211
|
-
if not is_hk_data_packet(packet):
|
|
2212
|
-
packet = SpaceWirePacket.create_packet(packet)
|
|
2213
|
-
raise NoHousekeepingPacketError(
|
|
2214
|
-
f"Expected a Housekeeping packet, but got {packet.__class__.__name__}")
|
|
2215
|
-
|
|
2216
|
-
hk_packet: HousekeepingPacket = SpaceWirePacket.create_packet(packet)
|
|
2217
|
-
|
|
2218
|
-
LOGGER.info(f"Housekeeping Packet received: {hk_packet.type!s}")
|
|
2219
|
-
|
|
2220
|
-
return hk_packet, timestamp
|
|
2221
|
-
|
|
2222
|
-
|
|
2223
|
-
def process_hk_packet(hk_packet: HousekeepingPacket, timestamp: str,
|
|
2224
|
-
storage: StorageProxy, origin: str, frame_number: int,
|
|
2225
|
-
mon_socket: zmq.Socket, dist_socket: zmq.Socket):
|
|
2226
|
-
"""
|
|
2227
|
-
Saves the housekeeping packet and associated timestamp for this frame. The data and timestamp
|
|
2228
|
-
are also published on the monitoring and data distribution message queue.
|
|
2229
|
-
|
|
2230
|
-
Args:
|
|
2231
|
-
hk_packet: the HousekeepingPacket
|
|
2232
|
-
timestamp: the timestamp associated with the reception of this packet
|
|
2233
|
-
frame_number: the current frame number
|
|
2234
|
-
storage: the proxy that is used to communicate with the Storage manager
|
|
2235
|
-
origin: the registration identifier for the Storage manager
|
|
2236
|
-
mon_socket: the ZeroMQ socket to which monitoring sync signals are sent
|
|
2237
|
-
dist_socket: the ZeroMQ socket to which SpW data is sent (for real-time view)
|
|
2238
|
-
|
|
2239
|
-
Returns:
|
|
2240
|
-
Nothing.
|
|
2241
|
-
"""
|
|
2242
|
-
|
|
2243
|
-
LOGGER.debug(f"Saving Housekeeping packet: {hk_packet.type!s}, "
|
|
2244
|
-
f"frame counter={hk_packet.frame_counter}, "
|
|
2245
|
-
f"sequence counter={hk_packet.sequence_counter}")
|
|
2246
|
-
|
|
2247
|
-
response = storage.save(
|
|
2248
|
-
{
|
|
2249
|
-
"origin": origin,
|
|
2250
|
-
"data":
|
|
2251
|
-
{
|
|
2252
|
-
f"/{frame_number}/hk": hk_packet,
|
|
2253
|
-
}
|
|
2254
|
-
}
|
|
2255
|
-
)
|
|
2256
|
-
|
|
2257
|
-
LOGGER.debug(f"Response from saving HK Packet: {response}")
|
|
2258
|
-
|
|
2259
|
-
msg_id = MessageIdentifier.SYNC_HK_PACKET.to_bytes(1, 'big')
|
|
2260
|
-
|
|
2261
|
-
pickle_string = pickle.dumps((hk_packet.type, timestamp))
|
|
2262
|
-
mon_socket.send_multipart([msg_id, pickle_string])
|
|
2263
|
-
|
|
2264
|
-
pickle_string = pickle.dumps((hk_packet, timestamp))
|
|
2265
|
-
dist_socket.send_multipart([msg_id, pickle_string])
|
|
2266
|
-
|
|
2267
|
-
|
|
2268
|
-
def read_and_process_data_packets(
|
|
2269
|
-
transport: SpaceWireInterface, storage: StorageProxy, origin_spw_data: str,
|
|
2270
|
-
start_time: float, mode: int, register_map: RegisterMap, data_attr: dict,
|
|
2271
|
-
internals: DPUInternals, dist_socket: zmq.Socket
|
|
2272
|
-
):
|
|
2273
|
-
"""
|
|
2274
|
-
Read the data packets when they are available depending on the mode.
|
|
2275
|
-
|
|
2276
|
-
Args:
|
|
2277
|
-
transport: the SpaceWire interfaces that is used for communication to the N-FEE
|
|
2278
|
-
storage: the proxy that is used to communicate with the Storage manager
|
|
2279
|
-
origin_spw_data: the registration identifier for the Storage manager
|
|
2280
|
-
start_time: the approximate time that the readout cycle started
|
|
2281
|
-
mode: FPGA mode
|
|
2282
|
-
register_map: the DPU Processor's copy of the N-FEE register map
|
|
2283
|
-
data_attr: register values to be saved with the data
|
|
2284
|
-
internals: use for expected_last_packet_flags (these will be updated within this function)
|
|
2285
|
-
dist_socket: the ZeroMQ socket to which SpW data is sent (for real-time view)
|
|
2286
|
-
|
|
2287
|
-
Raises:
|
|
2288
|
-
NoDataPacketError when the expected packet is not a data packet.
|
|
2289
|
-
"""
|
|
2290
|
-
|
|
2291
|
-
if internals.dump_mode or mode not in (
|
|
2292
|
-
n_fee_mode.FULL_IMAGE_PATTERN_MODE,
|
|
2293
|
-
n_fee_mode.WINDOWING_PATTERN_MODE,
|
|
2294
|
-
n_fee_mode.FULL_IMAGE_MODE,
|
|
2295
|
-
):
|
|
2296
|
-
return
|
|
2297
|
-
|
|
2298
|
-
timestamp = format_datetime()
|
|
2299
|
-
msg_id = MessageIdentifier.SYNC_DATA_PACKET.to_bytes(1, 'big')
|
|
2300
|
-
|
|
2301
|
-
data_count = 0
|
|
2302
|
-
|
|
2303
|
-
# Initialise the flags that determine if the last packet has arrived for
|
|
2304
|
-
# all expected data packets.
|
|
2305
|
-
|
|
2306
|
-
actual_last_packet_flags = [False, False, False, False]
|
|
2307
|
-
|
|
2308
|
-
# Read the data, until all the expected last packet bits are set.
|
|
2309
|
-
# This should be within next 4 seconds.
|
|
2310
|
-
|
|
2311
|
-
LOGGER.info("Reading data packets....")
|
|
2312
|
-
|
|
2313
|
-
terminator, packet = transport.read_packet()
|
|
2314
|
-
|
|
2315
|
-
data_packet: DataPacket = SpaceWirePacket.create_packet(packet)
|
|
2316
|
-
|
|
2317
|
-
if (not isinstance(data_packet, DataDataPacket) and
|
|
2318
|
-
not isinstance(data_packet, OverscanDataPacket)):
|
|
2319
|
-
LOGGER.critical(f"DataPacket expected, got {data_packet}")
|
|
2320
|
-
raise NoDataPacketError(f"Expected a data packet, but got {data_packet.__class__.__name__}")
|
|
2321
|
-
|
|
2322
|
-
# LOGGER.debug(f"Got data packet of length {len(packet)}")
|
|
2323
|
-
|
|
2324
|
-
LOGGER.debug(f"Saving data packets: {data_packet.type!s}")
|
|
2325
|
-
|
|
2326
|
-
item_data = {f"/{internals.frame_number}/data/{data_count}": data_packet}
|
|
2327
|
-
|
|
2328
|
-
attrs = [(k, v) for k, v in data_attr.items()]
|
|
2329
|
-
item_data.update({f"/{internals.frame_number}/data:ATTRS": attrs})
|
|
2330
|
-
|
|
2331
|
-
response = storage.save(
|
|
2332
|
-
{
|
|
2333
|
-
"origin": origin_spw_data,
|
|
2334
|
-
"data": item_data
|
|
2335
|
-
}
|
|
2336
|
-
)
|
|
2337
|
-
|
|
2338
|
-
if isinstance(response, Exception):
|
|
2339
|
-
LOGGER.warning(f"Response from saving data packet: {response}")
|
|
2340
|
-
|
|
2341
|
-
pickle_string = pickle.dumps((data_packet, timestamp))
|
|
2342
|
-
dist_socket.send_multipart([msg_id, pickle_string])
|
|
2343
|
-
|
|
2344
|
-
data_count += 1
|
|
2345
|
-
|
|
2346
|
-
# Update the expected flags with the possibly new register values, but only
|
|
2347
|
-
# after we had the 400ms pulse which updates the settings from the
|
|
2348
|
-
# register map. The test is needed because the register here on the DPU
|
|
2349
|
-
# processing side can be updated also on 200ms sync pulses, but the changes
|
|
2350
|
-
# only take effect on the 400ms pulse in the N-FEE.
|
|
2351
|
-
|
|
2352
|
-
if data_packet.type.frame_number == 0:
|
|
2353
|
-
internals.expected_last_packet_flags = get_expected_last_packet_flags(register_map, internals.sensor_sel_enum)
|
|
2354
|
-
LOGGER.debug(f"{internals.expected_last_packet_flags=}")
|
|
2355
|
-
|
|
2356
|
-
idx = get_index_for_last_packet_flags(data_packet.type.packet_type, data_packet.type.ccd_side,
|
|
2357
|
-
internals.ccd_sides_enum)
|
|
2358
|
-
# LOGGER.debug(f"{idx=}, {data_packet.type.packet_type=}, {data_packet.type.ccd_side=}")
|
|
2359
|
-
actual_last_packet_flags[idx] = data_packet.type.last_packet
|
|
2360
|
-
|
|
2361
|
-
while not got_all_last_packets(
|
|
2362
|
-
actual_last_packet_flags, internals.expected_last_packet_flags):
|
|
2363
|
-
|
|
2364
|
-
terminator, packet = transport.read_packet()
|
|
2365
|
-
data_packet: DataPacket = SpaceWirePacket.create_packet(packet)
|
|
2366
|
-
|
|
2367
|
-
if (not isinstance(data_packet, DataDataPacket) and
|
|
2368
|
-
not isinstance(data_packet, OverscanDataPacket)):
|
|
2369
|
-
LOGGER.critical(f"DataPacket expected, got {data_packet}")
|
|
2370
|
-
raise NoDataPacketError(
|
|
2371
|
-
f"Expected a data packet, but got {data_packet.__class__.__name__}")
|
|
2372
|
-
|
|
2373
|
-
# LOGGER.debug(f"Saving data packet: {data_packet.type!s}")
|
|
2374
|
-
|
|
2375
|
-
response = storage.save(
|
|
2376
|
-
{
|
|
2377
|
-
"origin": origin_spw_data,
|
|
2378
|
-
"data":
|
|
2379
|
-
{
|
|
2380
|
-
f"/{internals.frame_number}/data/{data_count}": data_packet,
|
|
2381
|
-
}
|
|
2382
|
-
}
|
|
2383
|
-
)
|
|
2384
|
-
|
|
2385
|
-
if isinstance(response, Exception):
|
|
2386
|
-
LOGGER.warning(f"Response from saving data packet: {response}")
|
|
2387
|
-
|
|
2388
|
-
pickle_string = pickle.dumps((data_packet, timestamp))
|
|
2389
|
-
dist_socket.send_multipart([msg_id, pickle_string])
|
|
2390
|
-
|
|
2391
|
-
data_count += 1
|
|
2392
|
-
|
|
2393
|
-
#LOGGER.debug(f"Got data packet of length {len(packet)}")
|
|
2394
|
-
#LOGGER.debug(f"DataPacketHeader: {data_packet.header.type_as_object}")
|
|
2395
|
-
|
|
2396
|
-
idx = get_index_for_last_packet_flags(data_packet.type.packet_type, data_packet.type.ccd_side,
|
|
2397
|
-
internals.ccd_sides_enum)
|
|
2398
|
-
actual_last_packet_flags[idx] = data_packet.type.last_packet
|
|
2399
|
-
|
|
2400
|
-
# Sending data packets shall not take more than 4 seconds, and if we
|
|
2401
|
-
# wait longer than 6.25 seconds, all RMAP commands that we send will be
|
|
2402
|
-
# discarded.
|
|
2403
|
-
|
|
2404
|
-
if time.perf_counter() > start_time + 5.25:
|
|
2405
|
-
raise TimeExceededError(
|
|
2406
|
-
"Retrieving data packets exceeded the allowed 4.0 seconds, "
|
|
2407
|
-
"breaking out of the data loop."
|
|
2408
|
-
)
|
|
2409
|
-
|
|
2410
|
-
|
|
2411
|
-
def send_commands_to_n_fee(
|
|
2412
|
-
transport: SpaceWireInterface, storage: StorageProxy, origin: str,
|
|
2413
|
-
register_map: RegisterMap,
|
|
2414
|
-
command_q: multiprocessing.Queue,
|
|
2415
|
-
response_q: multiprocessing.Queue,
|
|
2416
|
-
internals: DPUInternals
|
|
2417
|
-
):
|
|
2418
|
-
"""
|
|
2419
|
-
Send RMAP commands to the N-FEE. The commands are read from the command queue that is shared
|
|
2420
|
-
with the DPU Controller. The response from the N-FEE is put on the response queue, also shared
|
|
2421
|
-
with the DPU Controller.
|
|
2422
|
-
|
|
2423
|
-
!!! note
|
|
2424
|
-
The current implementation allows only one command from the command queue per sync cycle.
|
|
2425
|
-
|
|
2426
|
-
Args:
|
|
2427
|
-
transport: the SpaceWire interfaces that is used for communication to the N-FEE
|
|
2428
|
-
storage: the proxy that is used to communicate with the Storage manager
|
|
2429
|
-
origin: the registration identifier for the Storage manager
|
|
2430
|
-
register_map: the DPU Processor's copy of the N-FEE register map
|
|
2431
|
-
command_q: the command queue
|
|
2432
|
-
response_q: the response queue
|
|
2433
|
-
internals: for some commands we need access to DPUInternals, e.g. num_cycles, dump_mode_int
|
|
2434
|
-
|
|
2435
|
-
Raises:
|
|
2436
|
-
Exceptions are caught and put on the response queue.
|
|
2437
|
-
"""
|
|
2438
|
-
|
|
2439
|
-
if internals.clear_error_flags:
|
|
2440
|
-
LOGGER.debug("Set the clear-error-flags register parameter.")
|
|
2441
|
-
try:
|
|
2442
|
-
_ = command_set_clear_error_flags(transport, register_map)
|
|
2443
|
-
except ValueError as exc:
|
|
2444
|
-
LOGGER.error("The clear-error-flags register parameter could not be set due to a ValueError.", exc_info=exc)
|
|
2445
|
-
|
|
2446
|
-
internals.clear_error_flags = False
|
|
2447
|
-
|
|
2448
|
-
command = response = None
|
|
2449
|
-
kwargs = {}
|
|
2450
|
-
try:
|
|
2451
|
-
(command, args, kwargs) = command_q.get_nowait()
|
|
2452
|
-
|
|
2453
|
-
# When num_cycles is not specified, don't even set it to 0, the N-FEE will stay in the
|
|
2454
|
-
# current configuration until commanded otherwise.
|
|
2455
|
-
|
|
2456
|
-
if num_cycles := kwargs.get("num_cycles"):
|
|
2457
|
-
LOGGER.debug(f"Set internals.num_cycle to {num_cycles}.")
|
|
2458
|
-
internals.num_cycles = num_cycles
|
|
2459
|
-
|
|
2460
|
-
# Some commanding requires to go back into internal sync dump mode
|
|
2461
|
-
|
|
2462
|
-
dump_mode_int = kwargs.get("dump_mode_int", False)
|
|
2463
|
-
LOGGER.debug(f"Set internals.dump_mode_int to {dump_mode_int}.")
|
|
2464
|
-
internals.dump_mode_int = dump_mode_int
|
|
2465
|
-
|
|
2466
|
-
LOGGER.debug(f"Executing Command: {command.__name__}, {args=}")
|
|
2467
|
-
response = command(transport, register_map, *args)
|
|
2468
|
-
LOGGER.debug(f"Command executed: {command.__name__}, {args=}, {response=}")
|
|
2469
|
-
|
|
2470
|
-
LOGGER.debug(f"Saving command: {command.__name__}, {args=}")
|
|
2471
|
-
|
|
2472
|
-
response_save = storage.save({
|
|
2473
|
-
"origin": origin,
|
|
2474
|
-
"data": {
|
|
2475
|
-
f"/{internals.frame_number}/command/": f"{command.__name__}, {args=}, {kwargs=}",
|
|
2476
|
-
}
|
|
2477
|
-
})
|
|
2478
|
-
|
|
2479
|
-
LOGGER.debug(f"Response from saving Command: {response_save}")
|
|
2480
|
-
|
|
2481
|
-
except queue.Empty:
|
|
2482
|
-
pass
|
|
2483
|
-
except (Exception,) as exc:
|
|
2484
|
-
LOGGER.error(
|
|
2485
|
-
f"Exception during command execution in DPU Processor: "
|
|
2486
|
-
f"{command}", exc_info=exc
|
|
2487
|
-
)
|
|
2488
|
-
raise NFEECommandError(
|
|
2489
|
-
f"An exception occurred sending the command {command} "
|
|
2490
|
-
f"to the N-FEE.") from exc
|
|
2491
|
-
finally:
|
|
2492
|
-
if command is not None and kwargs.get('response', True):
|
|
2493
|
-
response_q.put((command, response))
|
|
2494
|
-
|
|
2495
|
-
|
|
2496
|
-
def process_high_priority_commands(
|
|
2497
|
-
priority_q: multiprocessing.Queue,
|
|
2498
|
-
response_q: multiprocessing.Queue,
|
|
2499
|
-
n_fee_state: tuple, dpu_internals: DPUInternals, reg_map: RegisterMap):
|
|
2500
|
-
"""
|
|
2501
|
-
Execute high priority commands from the DPU Control Server / Controller. The `n_fee_state` and
|
|
2502
|
-
the `dpu_internals` tuples are passed to the high priority commands before any other arguments
|
|
2503
|
-
that were passed on the command queue.
|
|
2504
|
-
|
|
2505
|
-
Args:
|
|
2506
|
-
priority_q: the command queue with priority
|
|
2507
|
-
response_q: the response queue
|
|
2508
|
-
n_fee_state: a namedtuple containing the current state of the N-FEE
|
|
2509
|
-
dpu_internals: the internal settings of the DPU might be requested or set
|
|
2510
|
-
by a high priority command
|
|
2511
|
-
reg_map: the current register map from the DPU Processor
|
|
2512
|
-
"""
|
|
2513
|
-
|
|
2514
|
-
command = response = None
|
|
2515
|
-
try:
|
|
2516
|
-
(command, args) = priority_q.get_nowait()
|
|
2517
|
-
response = command(n_fee_state, dpu_internals, reg_map, *args)
|
|
2518
|
-
LOGGER.debug(f"Command executed: {command.__name__}, {args=}, {response=}")
|
|
2519
|
-
except queue.Empty:
|
|
2520
|
-
pass
|
|
2521
|
-
except (Exception,) as exc:
|
|
2522
|
-
LOGGER.error(
|
|
2523
|
-
f"Exception during command execution in DPU Processor: "
|
|
2524
|
-
f"{command}", exc_info=exc
|
|
2525
|
-
)
|
|
2526
|
-
raise NFEECommandError(
|
|
2527
|
-
f"An exception occurred sending the command {command} "
|
|
2528
|
-
f"to the N-FEE.") from exc
|
|
2529
|
-
finally:
|
|
2530
|
-
if command is not None:
|
|
2531
|
-
response_q.put((command, response))
|
|
2532
|
-
|
|
2533
|
-
|
|
2534
|
-
def get_index_for_last_packet_flags(packet_type: int, ccd_side: int, ccd_sides_enum):
|
|
2535
|
-
"""
|
|
2536
|
-
Returns the index into the last packet flags list.
|
|
2537
|
-
|
|
2538
|
-
The last packet flags list is organised as follows:
|
|
2539
|
-
|
|
2540
|
-
* index 0: data packet, E-side
|
|
2541
|
-
* index 1: data packet, F-side
|
|
2542
|
-
* index 2: overscan data packet, E-side
|
|
2543
|
-
* index:3: overscan data packet, F-side
|
|
2544
|
-
|
|
2545
|
-
Args:
|
|
2546
|
-
packet_type: the packet type as read from the packet header [datapacket=0, overscan=1,
|
|
2547
|
-
housekeeping=2]
|
|
2548
|
-
ccd_side: the ccd side as read from the packet header
|
|
2549
|
-
ccd_sides_enum: Enumeration with information on E and F
|
|
2550
|
-
|
|
2551
|
-
Returns:
|
|
2552
|
-
The index for the last packet flags list.
|
|
2553
|
-
"""
|
|
2554
|
-
if ccd_side == ccd_sides_enum.E_SIDE.value:
|
|
2555
|
-
return packet_type * 2
|
|
2556
|
-
else:
|
|
2557
|
-
return packet_type * 2 + 1
|
|
2558
|
-
|
|
2559
|
-
|
|
2560
|
-
def get_expected_last_packet_flags(register_map: Mapping, sensor_sel_enum: Enum) -> List[bool]:
|
|
2561
|
-
"""
|
|
2562
|
-
Build and returns a list of flags that define if a last packet is expected.
|
|
2563
|
-
|
|
2564
|
-
A last packet flag is expected for normal data packets and overscan data
|
|
2565
|
-
packets. For both these data packets we can expect E-side and F-side packets
|
|
2566
|
-
with a last packet flag. That brings the total expected flags to four. This
|
|
2567
|
-
function examines the register values `v_start`, `v_end`, and `sensor_sel`.
|
|
2568
|
-
|
|
2569
|
-
The flags are ordered as follows:
|
|
2570
|
-
|
|
2571
|
-
1. data packet and E-side
|
|
2572
|
-
2. data packet and F-side
|
|
2573
|
-
3. overscan data packet and E-side
|
|
2574
|
-
4. overscan data packet and F-side
|
|
2575
|
-
|
|
2576
|
-
Housekeeping packets are not considered here.
|
|
2577
|
-
|
|
2578
|
-
For comparing the flags with the actual data,
|
|
2579
|
-
use the function `got_all_last_packets(actual, expected)`.
|
|
2580
|
-
|
|
2581
|
-
Args:
|
|
2582
|
-
register_map: the current Register map for the N-FEE
|
|
2583
|
-
sensor_sel_enum:
|
|
2584
|
-
Returns:
|
|
2585
|
-
a list of flags.
|
|
2586
|
-
"""
|
|
2587
|
-
sensor_sel_from_register = register_map["sensor_sel"]
|
|
2588
|
-
|
|
2589
|
-
e_side = bool(sensor_sel_from_register & sensor_sel_enum.E_SIDE)
|
|
2590
|
-
f_side = bool(sensor_sel_from_register & sensor_sel_enum.F_SIDE)
|
|
2591
|
-
v_start = register_map["v_start"]
|
|
2592
|
-
v_end = register_map["v_end"]
|
|
2593
|
-
data_packet = v_start < 4510
|
|
2594
|
-
overscan_packet = v_end > 4509
|
|
2595
|
-
|
|
2596
|
-
return [
|
|
2597
|
-
data_packet and e_side,
|
|
2598
|
-
data_packet and f_side,
|
|
2599
|
-
overscan_packet and e_side,
|
|
2600
|
-
overscan_packet and f_side
|
|
2601
|
-
]
|
|
2602
|
-
|
|
2603
|
-
def create_expected_last_packet_flags(n_fee_state: NFEEState.StateTuple, sensor_sel_enum: Enum):
|
|
2604
|
-
"""
|
|
2605
|
-
Build and returns a list of flags that define if a last packet is expected.
|
|
2606
|
-
|
|
2607
|
-
A last packet flag is expected for normal data packets and overscan data
|
|
2608
|
-
packets. For both these data packets we can expect E-side and F-side packets
|
|
2609
|
-
with a last packet flag. That brings the total expected flags to four. This
|
|
2610
|
-
function examines the register values `v_start`, `v_end`, and `sensor_sel`.
|
|
2611
|
-
|
|
2612
|
-
The flags are ordered as follows:
|
|
2613
|
-
|
|
2614
|
-
1. data packet and E-side
|
|
2615
|
-
2. data packet and F-side
|
|
2616
|
-
3. overscan data packet and E-side
|
|
2617
|
-
4. overscan data packet and F-side
|
|
2618
|
-
|
|
2619
|
-
Housekeeping packets are not considered here.
|
|
2620
|
-
|
|
2621
|
-
For comparing the flags with the actual data,
|
|
2622
|
-
use the function `got_all_last_packets(actual, expected)`.
|
|
2623
|
-
|
|
2624
|
-
Args:
|
|
2625
|
-
n_fee_state: a namedtuple containing the current N-FEE State
|
|
2626
|
-
sensor_sel_enum: Enumeration with the sensor_sel
|
|
2627
|
-
Returns:
|
|
2628
|
-
a list of flags.
|
|
2629
|
-
"""
|
|
2630
|
-
sensor_sel_from_nfee_state = n_fee_state.sensor_sel
|
|
2631
|
-
|
|
2632
|
-
v_start = n_fee_state.v_start
|
|
2633
|
-
v_end = n_fee_state.v_end
|
|
2634
|
-
|
|
2635
|
-
e_side = bool(sensor_sel_from_nfee_state & sensor_sel_enum.E_SIDE)
|
|
2636
|
-
f_side = bool(sensor_sel_from_nfee_state & sensor_sel_enum.F_SIDE)
|
|
2637
|
-
data_packet = v_start < 4510
|
|
2638
|
-
overscan_packet = v_end > 4509
|
|
2639
|
-
|
|
2640
|
-
return [
|
|
2641
|
-
data_packet and e_side,
|
|
2642
|
-
data_packet and f_side,
|
|
2643
|
-
overscan_packet and e_side,
|
|
2644
|
-
overscan_packet and f_side
|
|
2645
|
-
]
|
|
2646
|
-
|
|
2647
|
-
|
|
2648
|
-
def got_all_last_packets(actual, expected):
|
|
2649
|
-
"""
|
|
2650
|
-
Returns True if all the expected last packet flags have been seen.
|
|
2651
|
-
|
|
2652
|
-
Args:
|
|
2653
|
-
actual: the flags that have been seen so far
|
|
2654
|
-
expected: the expected flags
|
|
2655
|
-
|
|
2656
|
-
Returns:
|
|
2657
|
-
True if 'actual' matches 'expected', False otherwise.
|
|
2658
|
-
"""
|
|
2659
|
-
rc = all([x == y for (x, y) in zip(actual, expected)])
|
|
2660
|
-
# LOGGER.info(f"{expected=}, {actual=}, {rc=}")
|
|
2661
|
-
return rc
|
|
2662
|
-
|
|
2663
|
-
|
|
2664
|
-
if __name__ == "__main__":
|
|
2665
|
-
|
|
2666
|
-
def do_something(idx):
|
|
2667
|
-
LOGGER.info(f"Hello! {idx=}")
|
|
2668
|
-
|
|
2669
|
-
moni = DPUMonitoring()
|
|
2670
|
-
moni.connect()
|
|
2671
|
-
|
|
2672
|
-
for idx in range(3):
|
|
2673
|
-
# moni.on_long_pulse_do(do_something, idx)
|
|
2674
|
-
|
|
2675
|
-
# timecode, timestamp = moni.wait_for_timecode()
|
|
2676
|
-
# LOGGER.info(f"{timecode=}, {timestamp=}")
|
|
2677
|
-
|
|
2678
|
-
filenames = moni.wait_for_hdf5_filename()
|
|
2679
|
-
LOGGER.info(f"{filenames=}")
|
|
2680
|
-
|
|
2681
|
-
moni.disconnect()
|