legend-daq2lh5 1.6.1__tar.gz → 1.6.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/PKG-INFO +2 -2
  2. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/pyproject.toml +1 -1
  3. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/_version.py +2 -2
  4. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/cli.py +12 -0
  5. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/compass/compass_event_decoder.py +50 -42
  6. legend_daq2lh5-1.6.3/src/daq2lh5/compass/compass_header_decoder.py +247 -0
  7. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/compass/compass_streamer.py +39 -33
  8. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/fc/fc_config_decoder.py +6 -0
  9. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/fc/fc_event_decoder.py +35 -18
  10. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/fc/fc_eventheader_decoder.py +8 -5
  11. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/fc/fc_fsp_decoder.py +14 -9
  12. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/fc/fc_status_decoder.py +1 -1
  13. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/fc/fc_streamer.py +2 -1
  14. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/orca_fcio.py +122 -83
  15. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/orca_streamer.py +16 -7
  16. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/legend_daq2lh5.egg-info/PKG-INFO +2 -2
  17. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/legend_daq2lh5.egg-info/requires.txt +1 -1
  18. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/compass/conftest.py +1 -2
  19. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/compass/test_compass_header_decoder.py +2 -0
  20. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/fc/test_fc_streamer.py +5 -1
  21. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/orca/test_orca_fcio.py +1 -1
  22. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/orca/test_orca_to_raw.py +69 -0
  23. legend_daq2lh5-1.6.3/tests/test_cli.py +56 -0
  24. legend_daq2lh5-1.6.1/src/daq2lh5/compass/compass_header_decoder.py +0 -74
  25. legend_daq2lh5-1.6.1/tests/test_cli.py +0 -27
  26. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/LICENSE +0 -0
  27. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/README.md +0 -0
  28. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/setup.cfg +0 -0
  29. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/__init__.py +0 -0
  30. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/buffer_processor/__init__.py +0 -0
  31. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/buffer_processor/buffer_processor.py +0 -0
  32. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/buffer_processor/lh5_buffer_processor.py +0 -0
  33. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/build_raw.py +0 -0
  34. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/compass/__init__.py +0 -0
  35. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/compass/compass_config_parser.py +0 -0
  36. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/data_decoder.py +0 -0
  37. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/data_streamer.py +0 -0
  38. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/fc/__init__.py +0 -0
  39. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/llama/__init__.py +0 -0
  40. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/llama/llama_base.py +0 -0
  41. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/llama/llama_event_decoder.py +0 -0
  42. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/llama/llama_header_decoder.py +0 -0
  43. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/llama/llama_streamer.py +0 -0
  44. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/logging.py +0 -0
  45. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/__init__.py +0 -0
  46. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/orca_base.py +0 -0
  47. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/orca_digitizers.py +0 -0
  48. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/orca_flashcam.py +0 -0
  49. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/orca_header.py +0 -0
  50. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/orca_header_decoder.py +0 -0
  51. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/orca_packet.py +0 -0
  52. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/orca_run_decoder.py +0 -0
  53. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/skim_orca_file.py +0 -0
  54. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/raw_buffer.py +0 -0
  55. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/daq2lh5/utils.py +0 -0
  56. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/legend_daq2lh5.egg-info/SOURCES.txt +0 -0
  57. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/legend_daq2lh5.egg-info/dependency_links.txt +0 -0
  58. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/legend_daq2lh5.egg-info/entry_points.txt +0 -0
  59. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/legend_daq2lh5.egg-info/not-zip-safe +0 -0
  60. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/src/legend_daq2lh5.egg-info/top_level.txt +0 -0
  61. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/buffer_processor/test_buffer_processor.py +0 -0
  62. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/buffer_processor/test_buffer_processor_configs/buffer_processor_config.json +0 -0
  63. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/buffer_processor/test_buffer_processor_configs/lh5_buffer_processor_config.json +0 -0
  64. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/buffer_processor/test_buffer_processor_configs/raw_out_spec_no_proc.json +0 -0
  65. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/buffer_processor/test_lh5_buffer_processor.py +0 -0
  66. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/compass/test_compass_event_decoder.py +0 -0
  67. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/compass/test_compass_streamer.py +0 -0
  68. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/configs/fc-out-spec.json +0 -0
  69. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/configs/orca-out-spec-cli.json +0 -0
  70. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/configs/orca-out-spec.json +0 -0
  71. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/conftest.py +0 -0
  72. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/fc/conftest.py +0 -0
  73. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/fc/test_fc_config_decoder.py +0 -0
  74. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/fc/test_fc_event_decoder.py +0 -0
  75. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/fc/test_fc_status_decoder.py +0 -0
  76. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/llama/conftest.py +0 -0
  77. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/llama/test_llama_event_decoder.py +0 -0
  78. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/llama/test_llama_header_decoder.py +0 -0
  79. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/llama/test_llama_streamer.py +0 -0
  80. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/orca/conftest.py +0 -0
  81. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/orca/test_or_run_decoder_for_run.py +0 -0
  82. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/orca/test_orca_fc.py +0 -0
  83. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/orca/test_orca_packet.py +0 -0
  84. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/test_build_raw.py +0 -0
  85. {legend_daq2lh5-1.6.1 → legend_daq2lh5-1.6.3}/tests/test_raw_buffer.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: legend-daq2lh5
3
- Version: 1.6.1
3
+ Version: 1.6.3
4
4
  Summary: Convert digitizer data to LH5
5
5
  Author-email: Jason Detwiler <jasondet@uw.edu>
6
6
  Maintainer: The LEGEND collaboration
@@ -23,7 +23,7 @@ Requires-Python: >=3.9
23
23
  Description-Content-Type: text/markdown
24
24
  License-File: LICENSE
25
25
  Requires-Dist: dspeed>=1.3
26
- Requires-Dist: fcio>=0.7.8
26
+ Requires-Dist: fcio>=0.7.9
27
27
  Requires-Dist: h5py>=3.2.0
28
28
  Requires-Dist: hdf5plugin
29
29
  Requires-Dist: legend-pydataobj>=1.6
@@ -32,7 +32,7 @@ classifiers = [
32
32
  requires-python = ">=3.9"
33
33
  dependencies = [
34
34
  "dspeed>=1.3",
35
- "fcio>=0.7.8",
35
+ "fcio>=0.7.9",
36
36
  "h5py>=3.2.0",
37
37
  "hdf5plugin",
38
38
  "legend-pydataobj>=1.6",
@@ -17,5 +17,5 @@ __version__: str
17
17
  __version_tuple__: VERSION_TUPLE
18
18
  version_tuple: VERSION_TUPLE
19
19
 
20
- __version__ = version = '1.6.1'
21
- __version_tuple__ = version_tuple = (1, 6, 1)
20
+ __version__ = version = '1.6.3'
21
+ __version_tuple__ = version_tuple = (1, 6, 3)
@@ -3,6 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import argparse
6
+ import json
6
7
  import os
7
8
  import sys
8
9
 
@@ -78,6 +79,13 @@ def daq2lh5_cli():
78
79
  parser.add_argument(
79
80
  "--overwrite", "-w", action="store_true", help="""Overwrite output files"""
80
81
  )
82
+ parser.add_argument(
83
+ "--kwargs",
84
+ "-k",
85
+ type=str,
86
+ default="{}",
87
+ help="""Any additional kwargs to pass to build_raw, will be parsed as a JSON string""",
88
+ )
81
89
 
82
90
  args = parser.parse_args()
83
91
 
@@ -92,6 +100,9 @@ def daq2lh5_cli():
92
100
  print(__version__) # noqa: T201
93
101
  sys.exit()
94
102
 
103
+ if args.kwargs:
104
+ kwargs = json.loads(args.kwargs)
105
+
95
106
  for stream in args.in_stream:
96
107
  basename = os.path.splitext(os.path.basename(stream))[0]
97
108
  build_raw(
@@ -102,4 +113,5 @@ def daq2lh5_cli():
102
113
  n_max=args.max_rows,
103
114
  overwrite=args.overwrite,
104
115
  orig_basename=basename,
116
+ **kwargs,
105
117
  )
@@ -25,8 +25,10 @@ compass_decoded_values = {
25
25
  "channel": {"dtype": "uint32"},
26
26
  # Timestamp of event
27
27
  "timestamp": {"dtype": "float64", "units": "ps"},
28
- # Energy of event
28
+ # Energy of event in channels
29
29
  "energy": {"dtype": "uint32"},
30
+ # Energy of event, calibrated
31
+ "energy_calibrated": {"dtype": "float64"},
30
32
  # Energy short of event
31
33
  "energy_short": {"dtype": "uint32"},
32
34
  # Flags that the digitizer raised
@@ -153,50 +155,56 @@ class CompassEventDecoder(DataDecoder):
153
155
  # the time stamp also does not care about if we have an energy short present
154
156
  tbl["timestamp"].nda[ii] = np.frombuffer(packet[4:12], dtype=np.uint64)[0]
155
157
 
156
- # get the rest of the values depending on if there is an energy_short present
157
- if int(header["energy_short"].value) == 1: # again, the header is a struct
158
+ # stumble our way through the energy, depending on what the header says
159
+ bytes_read = 12
160
+ if int(header["energy_channels"].value) == 1:
158
161
  tbl["energy"].nda[ii] = np.frombuffer(packet[12:14], dtype=np.uint16)[0]
159
- tbl["energy_short"].nda[ii] = np.frombuffer(packet[14:16], dtype=np.uint16)[
160
- 0
161
- ]
162
- tbl["flags"].nda[ii] = np.frombuffer(packet[16:20], np.uint32)[0]
163
- tbl["num_samples"].nda[ii] = np.frombuffer(packet[21:25], dtype=np.uint32)[
164
- 0
165
- ]
166
-
167
- if (
168
- tbl["num_samples"].nda[ii]
169
- != self.decoded_values[bc]["waveform"]["wf_len"]
170
- ): # make sure that the waveform we read in is the same length as in the config
171
- raise RuntimeError(
172
- f"Waveform size {tbl['num_samples'].nda[ii]} doesn't match expected size {self.decoded_values[bc]['waveform']['wf_len']}. "
173
- "Skipping packet"
174
- )
175
-
176
- tbl["waveform"]["values"].nda[ii] = np.frombuffer(
177
- packet[25:], dtype=np.uint16
178
- )
179
-
162
+ bytes_read += 2
163
+ if int(header["energy_calibrated"].value) == 1:
164
+ tbl["energy_calibrated"].nda[ii] = None
165
+ elif (int(header["energy_calibrated"].value) == 1) and (
166
+ int(header["energy_channels"].value) == 0
167
+ ):
168
+ tbl["energy_calibrated"].nda[ii] = np.frombuffer(
169
+ packet[14:22], dtype=np.float64
170
+ )[0]
171
+ bytes_read += 8
172
+ tbl["energy"].nda[ii] = None
180
173
  else:
181
- tbl["energy"].nda[ii] = np.frombuffer(packet[12:14], dtype=np.uint16)[0]
182
- tbl["energy_short"].nda[ii] = None
183
- tbl["flags"].nda[ii] = np.frombuffer(packet[14:18], np.uint32)[0]
184
- tbl["num_samples"].nda[ii] = np.frombuffer(packet[19:23], dtype=np.uint32)[
185
- 0
186
- ]
187
-
188
- if (
189
- tbl["num_samples"].nda[ii]
190
- != self.decoded_values[bc]["waveform"]["wf_len"]
191
- ): # make sure that the waveform we read in is the same length as in the config
192
- raise RuntimeError(
193
- f"Waveform size {tbl['num_samples'].nda[ii]} doesn't match expected size {self.decoded_values[bc]['waveform']['wf_len']}. "
194
- "Skipping packet"
195
- )
196
-
197
- tbl["waveform"]["values"].nda[ii] = np.frombuffer(
198
- packet[23:], dtype=np.uint16
174
+ tbl["energy_calibrated"].nda[ii] = np.frombuffer(
175
+ packet[12:20], dtype=np.float64
176
+ )[0]
177
+ bytes_read += 8
178
+
179
+ # now handle the energy short
180
+ if int(header["energy_short"].value) == 1:
181
+ tbl["energy_short"].nda[ii] = np.frombuffer(
182
+ packet[bytes_read : bytes_read + 2], dtype=np.uint16
183
+ )[0]
184
+ bytes_read += 2
185
+ else:
186
+ tbl["energy_short"].nda[ii] = 0
187
+
188
+ tbl["flags"].nda[ii] = np.frombuffer(
189
+ packet[bytes_read : bytes_read + 4], np.uint32
190
+ )[0]
191
+ bytes_read += 5 # skip over the waveform code
192
+ tbl["num_samples"].nda[ii] = np.frombuffer(
193
+ packet[bytes_read : bytes_read + 4], dtype=np.uint32
194
+ )[0]
195
+ bytes_read += 4
196
+
197
+ if (
198
+ tbl["num_samples"].nda[ii] != self.decoded_values[bc]["waveform"]["wf_len"]
199
+ ): # make sure that the waveform we read in is the same length as in the config
200
+ raise RuntimeError(
201
+ f"Waveform size {tbl['num_samples'].nda[ii]} doesn't match expected size {self.decoded_values[bc]['waveform']['wf_len']}. "
202
+ "Skipping packet"
199
203
  )
200
204
 
205
+ tbl["waveform"]["values"].nda[ii] = np.frombuffer(
206
+ packet[bytes_read:], dtype=np.uint16
207
+ )
208
+
201
209
  evt_rbkd[bc].loc += 1
202
210
  return evt_rbkd[bc].is_full()
@@ -0,0 +1,247 @@
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+
5
+ import lgdo
6
+ import numpy as np
7
+
8
+ from ..data_decoder import DataDecoder
9
+ from .compass_config_parser import compass_config_to_struct
10
+
11
+ log = logging.getLogger(__name__)
12
+
13
+
14
+ class CompassHeaderDecoder(DataDecoder):
15
+ """
16
+ Decode CoMPASS header data. Also, read in CoMPASS config data if provided using the compass_config_parser
17
+ """
18
+
19
+ def __init__(self, *args, **kwargs) -> None:
20
+ super().__init__(*args, **kwargs)
21
+ self.config = None # initialize to none, because compass_config_to_struct always returns a struct
22
+
23
+ def decode_header(self, in_stream: bytes, config_file: str = None) -> dict:
24
+ """Decode the CoMPASS file header, and add CoMPASS config data to the header, if present.
25
+
26
+ Parameters
27
+ ----------
28
+ in_stream
29
+ The stream of data to have its header decoded
30
+ config_file
31
+ The config file for the CoMPASS data, if present
32
+
33
+ Returns
34
+ -------
35
+ config
36
+ A dict containing the header information, as well as the important config information
37
+ of wf_len and num_enabled_channels
38
+ """
39
+ wf_len = None
40
+ config_names = [
41
+ "energy_channels", # energy is given in channels (0: false, 1: true)
42
+ "energy_calibrated", # energy is given in keV/MeV, according to the calibration (0: false, 1: true)
43
+ "energy_short", # energy short is present (0: false, 1: true)
44
+ "waveform_samples", # waveform samples are present (0: false, 1: true)
45
+ "header_present", # there is a 2 byte header present in the file (0: false, 1: true)
46
+ ] # need to determine which of these are present in a file
47
+
48
+ # First need to check if the first two bytes are of the form 0xCAEx
49
+ # CoMPASS specs say that every file should start with this header, but if CoMPASS writes size-limited files, then this header may not be present in *all* files...
50
+ header_in_bytes = in_stream.read(2)
51
+
52
+ if header_in_bytes[-1] == int.from_bytes(b"\xca", byteorder="big"):
53
+ log.debug("header is present in file.")
54
+ header_in_binary = bin(int.from_bytes(header_in_bytes, byteorder="little"))
55
+ header_as_list = str(header_in_binary)[
56
+ ::-1
57
+ ] # reverse it as we care about bit 0, bit 1, etc.
58
+ header_dict = dict(
59
+ {
60
+ "energy_channels": int(header_as_list[0]) == 1,
61
+ "energy_calibrated": int(header_as_list[1]) == 1,
62
+ "energy_channels_calibrated": int(header_as_list[0])
63
+ == 1 & int(header_as_list[1])
64
+ == 1,
65
+ "energy_short": int(header_as_list[2]) == 1,
66
+ "waveform_samples": int(header_as_list[3]) == 1,
67
+ "header_present": True,
68
+ }
69
+ )
70
+
71
+ # if we don't have the wf_len, get it now
72
+
73
+ if config_file is None:
74
+ if header_dict["waveform_samples"] == 0:
75
+ wf_len = 0
76
+ else:
77
+ wf_byte_len = 4
78
+ bytes_to_read = (
79
+ 12 # covers 2-byte board, 2-byte channel, 8-byte time stamp
80
+ )
81
+ bytes_to_read += (
82
+ 2 * header_dict["energy_channels"]
83
+ + 8 * header_dict["energy_calibrated"]
84
+ + 2 * header_dict["energy_short"]
85
+ )
86
+ bytes_to_read += 4 + 1 # 4-byte flags, 1-byte waveform code
87
+ first_bytes = in_stream.read(bytes_to_read + wf_byte_len)
88
+
89
+ wf_len = np.frombuffer(
90
+ first_bytes[bytes_to_read : bytes_to_read + wf_byte_len],
91
+ dtype=np.uint32,
92
+ )[0]
93
+
94
+ # if header is not present, we need to play some tricks
95
+ # either energy short is present or not
96
+ # and one of three options for energy (ADC, calibrated, both)
97
+ else:
98
+ # If the 2 byte header is not present, then we have read in the board by accident
99
+ header_in_bytes += in_stream.read(
100
+ 10
101
+ ) # read in the 2-byte ch and 8-byte timestamp
102
+ bytes_read = 12
103
+ fixed_header_start_len = (
104
+ 12 # always 12 bytes: 2-byte board, 2-byte channel, 8-byte timestamp
105
+ )
106
+ possible_energy_header_byte_lengths = [
107
+ 2,
108
+ 8,
109
+ 10,
110
+ ] # either ADC, Calibrated, or both
111
+ possible_energy_short_header_byte_lengths = [
112
+ 0,
113
+ 2,
114
+ ] # energy short is present or not
115
+ fixed_header_part = 5 # 5 bytes from flags + code
116
+ wf_len_bytes = 4 # wf_len is 4 bytes long
117
+
118
+ for prefix in possible_energy_header_byte_lengths:
119
+ terminate = False
120
+ for suffix in possible_energy_short_header_byte_lengths:
121
+
122
+ # ---- first packet -------
123
+ # don't read more than we have to, check how many more bytes we need to read in
124
+ difference = (
125
+ fixed_header_start_len
126
+ + prefix
127
+ + suffix
128
+ + fixed_header_part
129
+ + wf_len_bytes
130
+ - bytes_read
131
+ )
132
+ if difference > 0:
133
+ # just read a bit more data
134
+ header_in_bytes += in_stream.read(difference)
135
+ bytes_read += difference
136
+
137
+ wf_len_guess = np.frombuffer(
138
+ header_in_bytes[
139
+ fixed_header_start_len
140
+ + prefix
141
+ + suffix
142
+ + fixed_header_part : fixed_header_start_len
143
+ + prefix
144
+ + suffix
145
+ + fixed_header_part
146
+ + wf_len_bytes
147
+ ],
148
+ dtype=np.uint32,
149
+ )[0]
150
+
151
+ # read in the first waveform data
152
+ difference = (
153
+ fixed_header_start_len
154
+ + prefix
155
+ + suffix
156
+ + fixed_header_part
157
+ + wf_len_bytes
158
+ + 2 * wf_len_guess
159
+ - bytes_read
160
+ )
161
+ if difference > 0:
162
+ header_in_bytes += in_stream.read(2 * wf_len_guess)
163
+ bytes_read += 2 * wf_len_guess
164
+
165
+ # ------ second packet header ----------
166
+ difference = (
167
+ 2
168
+ * (
169
+ fixed_header_start_len
170
+ + prefix
171
+ + suffix
172
+ + fixed_header_part
173
+ + wf_len_bytes
174
+ )
175
+ + 2 * wf_len_guess
176
+ - bytes_read
177
+ )
178
+ if difference > 0:
179
+ header_in_bytes += in_stream.read(difference)
180
+ bytes_read += (
181
+ fixed_header_start_len
182
+ + prefix
183
+ + suffix
184
+ + fixed_header_part
185
+ + wf_len_bytes
186
+ )
187
+ wf_len_guess_2 = np.frombuffer(
188
+ header_in_bytes[
189
+ 2
190
+ * (
191
+ fixed_header_start_len
192
+ + prefix
193
+ + suffix
194
+ + fixed_header_part
195
+ )
196
+ + wf_len_bytes
197
+ + 2
198
+ * wf_len_guess : 2
199
+ * (
200
+ fixed_header_start_len
201
+ + prefix
202
+ + suffix
203
+ + fixed_header_part
204
+ + wf_len_bytes
205
+ )
206
+ + 2 * wf_len_guess
207
+ ],
208
+ dtype=np.uint32,
209
+ )[0]
210
+
211
+ # if the waveform lengths agree, then we can stride packets correctly
212
+ if wf_len_guess_2 == wf_len_guess:
213
+ header_dict = dict(
214
+ {
215
+ "energy_channels": prefix == 2,
216
+ "energy_calibrated": prefix == 8,
217
+ "energy_channels_calibrated": prefix == 10,
218
+ "energy_short": suffix == 2,
219
+ "waveform_samples": wf_len != 0,
220
+ "header_present": False,
221
+ }
222
+ )
223
+ wf_len = wf_len_guess
224
+ terminate = True
225
+ break
226
+ if terminate:
227
+ break
228
+
229
+ self.config = compass_config_to_struct(config_file, wf_len)
230
+
231
+ for name in config_names:
232
+ if name in self.config:
233
+ log.warning(f"{name} already in self.config. skipping...")
234
+ continue
235
+ value = int(header_dict[name])
236
+ self.config.add_field(
237
+ str(name), lgdo.Scalar(value)
238
+ ) # self.config is a struct
239
+
240
+ return self.config
241
+
242
+ def make_lgdo(self, key: int = None, size: int = None) -> lgdo.Struct:
243
+ if self.config is None:
244
+ raise RuntimeError(
245
+ "self.config still None, need to decode header before calling make_lgdo"
246
+ )
247
+ return self.config # self.config is already an lgdo, namely it is a struct
@@ -72,22 +72,6 @@ class CompassStreamer(DataStreamer):
72
72
  So, we must read this header once, and then proceed to read packets in.
73
73
  """
74
74
  # If a config file is not present, the wf_len can be determined by opening the first few bytes of the in_stream
75
- wf_len = None
76
- if self.compass_config_file is None:
77
- self.set_in_stream(stream_name)
78
-
79
- first_bytes = self.in_stream.read(27)
80
-
81
- energy_short = str(
82
- bin(int.from_bytes(first_bytes[:2], byteorder="little"))
83
- )[::-1][2]
84
-
85
- if int(energy_short) == 1:
86
- [wf_len] = np.frombuffer(first_bytes[23:27], dtype=np.uint32)
87
- else:
88
- [wf_len] = np.frombuffer(first_bytes[21:25], dtype=np.uint32)
89
-
90
- self.close_stream()
91
75
 
92
76
  # set the in_stream
93
77
  self.set_in_stream(stream_name)
@@ -95,11 +79,20 @@ class CompassStreamer(DataStreamer):
95
79
 
96
80
  # read in and decode the file header info, passing the compass_config_file, if present
97
81
  self.header = self.header_decoder.decode_header(
98
- self.in_stream, self.compass_config_file, wf_len
82
+ self.in_stream, self.compass_config_file
99
83
  ) # returns an lgdo.Struct
100
- self.n_bytes_read += (
101
- 2 # there are 2 bytes in the header, for a 16 bit number to read out
102
- )
84
+ self.close_stream()
85
+
86
+ # Now we are ready to read the data
87
+ self.set_in_stream(stream_name)
88
+ self.n_bytes_read = 0
89
+
90
+ if int(self.header["header_present"].value) == 1:
91
+ # read 2 bytes if we need to
92
+ self.in_stream.read(2)
93
+ self.n_bytes_read += (
94
+ 2 # there are 2 bytes in the header, for a 16 bit number to read out
95
+ )
103
96
 
104
97
  # set up data loop variables
105
98
  self.packet_id = 0
@@ -171,16 +164,34 @@ class CompassStreamer(DataStreamer):
171
164
  if self.in_stream is None:
172
165
  raise RuntimeError("self.in_stream is None")
173
166
 
174
- if (self.packet_id == 0) and (self.n_bytes_read != 2):
167
+ if (
168
+ (self.packet_id == 0)
169
+ and (self.n_bytes_read != 2)
170
+ and (int(self.header["header_present"].value) == 1)
171
+ ):
175
172
  raise RuntimeError(
176
173
  f"The 2 byte filer header was not converted, instead read in {self.n_bytes_read} for the file header"
177
174
  )
175
+ if (
176
+ (self.packet_id == 0)
177
+ and (self.n_bytes_read != 0)
178
+ and (int(self.header["header_present"].value) == 0)
179
+ ):
180
+ raise RuntimeError(
181
+ f"The header was not converted, instead read in {self.n_bytes_read} for the file header"
182
+ )
178
183
 
179
- # packets have metadata of variable lengths, depending on if the header shows that energy_short is present in the metadata
184
+ # packets have metadata of variable lengths, depending on what the header shows
185
+ header_length = 12 # header always starts with 2-bytes of board, 2-bytes of channel, and 8-bytes of timestamp
186
+ if int(self.header["energy_channels"].value) == 1:
187
+ header_length += 2 # if the energy is recorded in ADC channels, then there are an extra 2 bytes in the metadata
188
+ if int(self.header["energy_calibrated"].value) == 1:
189
+ header_length += 8 # if the energy is recorded in keV/MeV, then there are an extra 8 bytes in the metadata
180
190
  if int(self.header["energy_short"].value) == 1:
181
- header_length = 25 # if the energy short is present, then there are an extra 2 bytes in the metadata
182
- else:
183
- header_length = 23 # the normal packet metadata is 23 bytes long
191
+ header_length += 2 # if the energy short is present, then there are an extra 2 bytes in the metadata
192
+ header_length += (
193
+ 4 + 1 + 4
194
+ ) # the flags, the waveform code bytes, and the waveform length
184
195
 
185
196
  # read the packet's metadata into the buffer
186
197
  pkt_hdr = self.buffer[:header_length]
@@ -190,16 +201,11 @@ class CompassStreamer(DataStreamer):
190
201
  # return None once we run out of file
191
202
  if n_bytes_read == 0:
192
203
  return None
193
- if (n_bytes_read != 25) and (n_bytes_read != 23):
204
+ if n_bytes_read not in [23, 25, 29, 31, 33]:
194
205
  raise RuntimeError(f"only got {n_bytes_read} bytes for packet header")
195
206
 
196
- # get the waveform length so we can read in the rest of the packet
197
- if n_bytes_read == 25:
198
- [num_samples] = np.frombuffer(pkt_hdr[21:25], dtype=np.uint32)
199
- pkt_length = header_length + 2 * num_samples
200
- if n_bytes_read == 23:
201
- [num_samples] = np.frombuffer(pkt_hdr[19:23], dtype=np.uint32)
202
- pkt_length = header_length + 2 * num_samples
207
+ [num_samples] = np.frombuffer(pkt_hdr[-4:], dtype=np.uint32)
208
+ pkt_length = header_length + 2 * num_samples
203
209
 
204
210
  # load into buffer, resizing as necessary
205
211
  if len(self.buffer) < pkt_length:
@@ -66,6 +66,7 @@ class FCConfigDecoder(DataDecoder):
66
66
  def __init__(self, *args, **kwargs) -> None:
67
67
  super().__init__(*args, **kwargs)
68
68
  self.decoded_values = copy.deepcopy(fc_config_decoded_values)
69
+ self.key_list = []
69
70
 
70
71
  def decode_packet(
71
72
  self,
@@ -126,7 +127,12 @@ class FCConfigDecoder(DataDecoder):
126
127
  ntraces = fcio.config.adcs + fcio.config.triggers
127
128
  tbl.add_field("tracemap", lgdo.Array(fcio.config.tracemap[:ntraces]))
128
129
 
130
+ self.key_list.append(f"fcid_{fcio.config.streamid & 0xFFFF}/config")
131
+
129
132
  return tbl
130
133
 
134
+ def get_key_lists(self) -> list[list[int | str]]:
135
+ return [copy.deepcopy(self.key_list)]
136
+
131
137
  def get_decoded_values(self, key: int | str = None) -> dict[str, dict[str, Any]]:
132
138
  return self.decoded_values
@@ -152,24 +152,41 @@ class FCEventDecoder(DataDecoder):
152
152
  tbl["board_id"].nda[loc] = fcio.event.card_address[ii]
153
153
  tbl["fc_input"].nda[loc] = fcio.event.card_channel[ii]
154
154
  tbl["event_type"].nda[loc] = fcio.event.type
155
- tbl["eventnumber"].nda[loc] = fcio.event.timestamp[0]
156
155
  tbl["numtraces"].nda[loc] = fcio.event.num_traces
157
- tbl["ts_pps"].nda[loc] = fcio.event.timestamp[1]
158
- tbl["ts_ticks"].nda[loc] = fcio.event.timestamp[2]
159
- tbl["ts_maxticks"].nda[loc] = fcio.event.timestamp[3]
160
- tbl["mu_offset_sec"].nda[loc] = fcio.event.timeoffset[0]
161
- tbl["mu_offset_usec"].nda[loc] = fcio.event.timeoffset[1]
162
- tbl["to_master_sec"].nda[loc] = fcio.event.timeoffset[2]
163
- tbl["delta_mu_usec"].nda[loc] = fcio.event.timeoffset[3]
164
- tbl["abs_delta_mu_usec"].nda[loc] = fcio.event.timeoffset[4]
165
- tbl["to_start_sec"].nda[loc] = fcio.event.timeoffset[5]
166
- tbl["to_start_usec"].nda[loc] = fcio.event.timeoffset[6]
167
- tbl["dr_start_pps"].nda[loc] = fcio.event.deadregion[0]
168
- tbl["dr_start_ticks"].nda[loc] = fcio.event.deadregion[1]
169
- tbl["dr_stop_pps"].nda[loc] = fcio.event.deadregion[2]
170
- tbl["dr_stop_ticks"].nda[loc] = fcio.event.deadregion[3]
171
- tbl["dr_maxticks"].nda[loc] = fcio.event.deadregion[4]
172
- # if event_type == 11: provides the same check
156
+
157
+ # the order of names is crucial here!
158
+ timestamp_names = [
159
+ "eventnumber",
160
+ "ts_pps",
161
+ "ts_ticks",
162
+ "ts_maxticks",
163
+ ]
164
+ for name, value in zip(timestamp_names, fcio.event.timestamp):
165
+ tbl[name].nda[loc] = value
166
+
167
+ timeoffset_names = [
168
+ "mu_offset_sec",
169
+ "mu_offset_usec",
170
+ "to_master_sec",
171
+ "delta_mu_usec",
172
+ "abs_delta_mu_usec",
173
+ "to_start_sec",
174
+ "to_start_usec",
175
+ ]
176
+ for name, value in zip(timeoffset_names, fcio.event.timeoffset):
177
+ tbl[name].nda[loc] = value
178
+
179
+ deadregion_names = [
180
+ "dr_start_pps",
181
+ "dr_start_ticks",
182
+ "dr_stop_pps",
183
+ "dr_stop_ticks",
184
+ "dr_maxticks",
185
+ ]
186
+ for name, value in zip(deadregion_names, fcio.event.deadregion[:5]):
187
+ tbl[name].nda[loc] = value
188
+
189
+ # if event_type == 11: would provide the same check
173
190
  # however, the usage of deadregion[5]/[6] must never change
174
191
  # and it will always be present if deadregion[7..] is ever used
175
192
  if fcio.event.deadregion_size >= 7:
@@ -179,7 +196,7 @@ class FCEventDecoder(DataDecoder):
179
196
  tbl["dr_ch_idx"].nda[loc] = 0
180
197
  tbl["dr_ch_len"].nda[loc] = fcio.config.adcs
181
198
 
182
- # The following values are calculated values by fcio-py
199
+ # The following values are calculated by fcio-py, derived from fields above.
183
200
  tbl["timestamp"].nda[loc] = fcio.event.unix_time_utc_sec
184
201
  tbl["deadinterval_nsec"].nda[loc] = fcio.event.dead_interval_nsec[ii]
185
202
  tbl["deadtime"].nda[loc] = fcio.event.dead_time_sec[ii]
@@ -14,9 +14,12 @@ log = logging.getLogger(__name__)
14
14
 
15
15
  def get_key(streamid: int, card_address: int, card_input: int, iwf: int = -1) -> int:
16
16
  if streamid > 0 or iwf < 0:
17
- # For backwards compatibility only the lower 16-bit of the streamid are
18
- # used.
19
- return (streamid & 0xFFFF) * 1000000 + card_address * 100 + card_input
17
+ # For backwards compatibility only the lower 16-bit of the streamid are used.
18
+ return (
19
+ (int(streamid) & 0xFFFF) * 1000000
20
+ + int(card_address) * 100
21
+ + int(card_input)
22
+ )
20
23
  else:
21
24
  return iwf
22
25
 
@@ -212,7 +215,7 @@ class FCEventHeaderDecoder(DataDecoder):
212
215
  self.decoded_values["baseline"]["length_guess"] = n_traces
213
216
  self.decoded_values["daqenergy"]["length_guess"] = n_traces
214
217
 
215
- self.key_list = [get_key(fcio_stream.config.streamid, 0, 0)]
218
+ self.key_list = [f"fcid_{fcio_stream.config.streamid & 0xFFFF}/evt_hdr"]
216
219
 
217
220
  def get_key_lists(self) -> list[list[int | str]]:
218
221
  return [copy.deepcopy(self.key_list)]
@@ -232,7 +235,7 @@ class FCEventHeaderDecoder(DataDecoder):
232
235
  ) -> bool:
233
236
 
234
237
  # only one key available: this streamid
235
- key = get_key(fcio.config.streamid, 0, 0)
238
+ key = f"fcid_{fcio.config.streamid & 0xFFFF}/evt_hdr"
236
239
  if key not in evt_hdr_rbkd:
237
240
  return False
238
241