legend-daq2lh5 1.6.2__tar.gz → 1.6.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/PKG-INFO +1 -1
  2. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/_version.py +2 -2
  3. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/compass/compass_event_decoder.py +50 -42
  4. legend_daq2lh5-1.6.3/src/daq2lh5/compass/compass_header_decoder.py +247 -0
  5. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/compass/compass_streamer.py +39 -33
  6. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/fc/fc_event_decoder.py +35 -18
  7. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/fc/fc_eventheader_decoder.py +6 -3
  8. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/orca_streamer.py +16 -7
  9. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/legend_daq2lh5.egg-info/PKG-INFO +1 -1
  10. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/compass/conftest.py +1 -2
  11. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/compass/test_compass_header_decoder.py +2 -0
  12. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/orca/test_orca_fcio.py +1 -1
  13. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/orca/test_orca_to_raw.py +69 -0
  14. legend_daq2lh5-1.6.2/src/daq2lh5/compass/compass_header_decoder.py +0 -74
  15. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/LICENSE +0 -0
  16. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/README.md +0 -0
  17. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/pyproject.toml +0 -0
  18. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/setup.cfg +0 -0
  19. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/__init__.py +0 -0
  20. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/buffer_processor/__init__.py +0 -0
  21. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/buffer_processor/buffer_processor.py +0 -0
  22. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/buffer_processor/lh5_buffer_processor.py +0 -0
  23. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/build_raw.py +0 -0
  24. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/cli.py +0 -0
  25. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/compass/__init__.py +0 -0
  26. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/compass/compass_config_parser.py +0 -0
  27. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/data_decoder.py +0 -0
  28. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/data_streamer.py +0 -0
  29. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/fc/__init__.py +0 -0
  30. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/fc/fc_config_decoder.py +0 -0
  31. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/fc/fc_fsp_decoder.py +0 -0
  32. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/fc/fc_status_decoder.py +0 -0
  33. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/fc/fc_streamer.py +0 -0
  34. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/llama/__init__.py +0 -0
  35. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/llama/llama_base.py +0 -0
  36. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/llama/llama_event_decoder.py +0 -0
  37. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/llama/llama_header_decoder.py +0 -0
  38. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/llama/llama_streamer.py +0 -0
  39. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/logging.py +0 -0
  40. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/__init__.py +0 -0
  41. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/orca_base.py +0 -0
  42. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/orca_digitizers.py +0 -0
  43. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/orca_fcio.py +0 -0
  44. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/orca_flashcam.py +0 -0
  45. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/orca_header.py +0 -0
  46. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/orca_header_decoder.py +0 -0
  47. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/orca_packet.py +0 -0
  48. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/orca_run_decoder.py +0 -0
  49. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/orca/skim_orca_file.py +0 -0
  50. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/raw_buffer.py +0 -0
  51. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/daq2lh5/utils.py +0 -0
  52. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/legend_daq2lh5.egg-info/SOURCES.txt +0 -0
  53. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/legend_daq2lh5.egg-info/dependency_links.txt +0 -0
  54. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/legend_daq2lh5.egg-info/entry_points.txt +0 -0
  55. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/legend_daq2lh5.egg-info/not-zip-safe +0 -0
  56. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/legend_daq2lh5.egg-info/requires.txt +0 -0
  57. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/src/legend_daq2lh5.egg-info/top_level.txt +0 -0
  58. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/buffer_processor/test_buffer_processor.py +0 -0
  59. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/buffer_processor/test_buffer_processor_configs/buffer_processor_config.json +0 -0
  60. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/buffer_processor/test_buffer_processor_configs/lh5_buffer_processor_config.json +0 -0
  61. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/buffer_processor/test_buffer_processor_configs/raw_out_spec_no_proc.json +0 -0
  62. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/buffer_processor/test_lh5_buffer_processor.py +0 -0
  63. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/compass/test_compass_event_decoder.py +0 -0
  64. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/compass/test_compass_streamer.py +0 -0
  65. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/configs/fc-out-spec.json +0 -0
  66. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/configs/orca-out-spec-cli.json +0 -0
  67. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/configs/orca-out-spec.json +0 -0
  68. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/conftest.py +0 -0
  69. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/fc/conftest.py +0 -0
  70. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/fc/test_fc_config_decoder.py +0 -0
  71. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/fc/test_fc_event_decoder.py +0 -0
  72. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/fc/test_fc_status_decoder.py +0 -0
  73. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/fc/test_fc_streamer.py +0 -0
  74. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/llama/conftest.py +0 -0
  75. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/llama/test_llama_event_decoder.py +0 -0
  76. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/llama/test_llama_header_decoder.py +0 -0
  77. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/llama/test_llama_streamer.py +0 -0
  78. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/orca/conftest.py +0 -0
  79. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/orca/test_or_run_decoder_for_run.py +0 -0
  80. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/orca/test_orca_fc.py +0 -0
  81. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/orca/test_orca_packet.py +0 -0
  82. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/test_build_raw.py +0 -0
  83. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/test_cli.py +0 -0
  84. {legend_daq2lh5-1.6.2 → legend_daq2lh5-1.6.3}/tests/test_raw_buffer.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: legend-daq2lh5
3
- Version: 1.6.2
3
+ Version: 1.6.3
4
4
  Summary: Convert digitizer data to LH5
5
5
  Author-email: Jason Detwiler <jasondet@uw.edu>
6
6
  Maintainer: The LEGEND collaboration
@@ -17,5 +17,5 @@ __version__: str
17
17
  __version_tuple__: VERSION_TUPLE
18
18
  version_tuple: VERSION_TUPLE
19
19
 
20
- __version__ = version = '1.6.2'
21
- __version_tuple__ = version_tuple = (1, 6, 2)
20
+ __version__ = version = '1.6.3'
21
+ __version_tuple__ = version_tuple = (1, 6, 3)
@@ -25,8 +25,10 @@ compass_decoded_values = {
25
25
  "channel": {"dtype": "uint32"},
26
26
  # Timestamp of event
27
27
  "timestamp": {"dtype": "float64", "units": "ps"},
28
- # Energy of event
28
+ # Energy of event in channels
29
29
  "energy": {"dtype": "uint32"},
30
+ # Energy of event, calibrated
31
+ "energy_calibrated": {"dtype": "float64"},
30
32
  # Energy short of event
31
33
  "energy_short": {"dtype": "uint32"},
32
34
  # Flags that the digitizer raised
@@ -153,50 +155,56 @@ class CompassEventDecoder(DataDecoder):
153
155
  # the time stamp also does not care about if we have an energy short present
154
156
  tbl["timestamp"].nda[ii] = np.frombuffer(packet[4:12], dtype=np.uint64)[0]
155
157
 
156
- # get the rest of the values depending on if there is an energy_short present
157
- if int(header["energy_short"].value) == 1: # again, the header is a struct
158
+ # stumble our way through the energy, depending on what the header says
159
+ bytes_read = 12
160
+ if int(header["energy_channels"].value) == 1:
158
161
  tbl["energy"].nda[ii] = np.frombuffer(packet[12:14], dtype=np.uint16)[0]
159
- tbl["energy_short"].nda[ii] = np.frombuffer(packet[14:16], dtype=np.uint16)[
160
- 0
161
- ]
162
- tbl["flags"].nda[ii] = np.frombuffer(packet[16:20], np.uint32)[0]
163
- tbl["num_samples"].nda[ii] = np.frombuffer(packet[21:25], dtype=np.uint32)[
164
- 0
165
- ]
166
-
167
- if (
168
- tbl["num_samples"].nda[ii]
169
- != self.decoded_values[bc]["waveform"]["wf_len"]
170
- ): # make sure that the waveform we read in is the same length as in the config
171
- raise RuntimeError(
172
- f"Waveform size {tbl['num_samples'].nda[ii]} doesn't match expected size {self.decoded_values[bc]['waveform']['wf_len']}. "
173
- "Skipping packet"
174
- )
175
-
176
- tbl["waveform"]["values"].nda[ii] = np.frombuffer(
177
- packet[25:], dtype=np.uint16
178
- )
179
-
162
+ bytes_read += 2
163
+ if int(header["energy_calibrated"].value) == 1:
164
+ tbl["energy_calibrated"].nda[ii] = None
165
+ elif (int(header["energy_calibrated"].value) == 1) and (
166
+ int(header["energy_channels"].value) == 0
167
+ ):
168
+ tbl["energy_calibrated"].nda[ii] = np.frombuffer(
169
+ packet[14:22], dtype=np.float64
170
+ )[0]
171
+ bytes_read += 8
172
+ tbl["energy"].nda[ii] = None
180
173
  else:
181
- tbl["energy"].nda[ii] = np.frombuffer(packet[12:14], dtype=np.uint16)[0]
182
- tbl["energy_short"].nda[ii] = None
183
- tbl["flags"].nda[ii] = np.frombuffer(packet[14:18], np.uint32)[0]
184
- tbl["num_samples"].nda[ii] = np.frombuffer(packet[19:23], dtype=np.uint32)[
185
- 0
186
- ]
187
-
188
- if (
189
- tbl["num_samples"].nda[ii]
190
- != self.decoded_values[bc]["waveform"]["wf_len"]
191
- ): # make sure that the waveform we read in is the same length as in the config
192
- raise RuntimeError(
193
- f"Waveform size {tbl['num_samples'].nda[ii]} doesn't match expected size {self.decoded_values[bc]['waveform']['wf_len']}. "
194
- "Skipping packet"
195
- )
196
-
197
- tbl["waveform"]["values"].nda[ii] = np.frombuffer(
198
- packet[23:], dtype=np.uint16
174
+ tbl["energy_calibrated"].nda[ii] = np.frombuffer(
175
+ packet[12:20], dtype=np.float64
176
+ )[0]
177
+ bytes_read += 8
178
+
179
+ # now handle the energy short
180
+ if int(header["energy_short"].value) == 1:
181
+ tbl["energy_short"].nda[ii] = np.frombuffer(
182
+ packet[bytes_read : bytes_read + 2], dtype=np.uint16
183
+ )[0]
184
+ bytes_read += 2
185
+ else:
186
+ tbl["energy_short"].nda[ii] = 0
187
+
188
+ tbl["flags"].nda[ii] = np.frombuffer(
189
+ packet[bytes_read : bytes_read + 4], np.uint32
190
+ )[0]
191
+ bytes_read += 5 # skip over the waveform code
192
+ tbl["num_samples"].nda[ii] = np.frombuffer(
193
+ packet[bytes_read : bytes_read + 4], dtype=np.uint32
194
+ )[0]
195
+ bytes_read += 4
196
+
197
+ if (
198
+ tbl["num_samples"].nda[ii] != self.decoded_values[bc]["waveform"]["wf_len"]
199
+ ): # make sure that the waveform we read in is the same length as in the config
200
+ raise RuntimeError(
201
+ f"Waveform size {tbl['num_samples'].nda[ii]} doesn't match expected size {self.decoded_values[bc]['waveform']['wf_len']}. "
202
+ "Skipping packet"
199
203
  )
200
204
 
205
+ tbl["waveform"]["values"].nda[ii] = np.frombuffer(
206
+ packet[bytes_read:], dtype=np.uint16
207
+ )
208
+
201
209
  evt_rbkd[bc].loc += 1
202
210
  return evt_rbkd[bc].is_full()
@@ -0,0 +1,247 @@
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+
5
+ import lgdo
6
+ import numpy as np
7
+
8
+ from ..data_decoder import DataDecoder
9
+ from .compass_config_parser import compass_config_to_struct
10
+
11
+ log = logging.getLogger(__name__)
12
+
13
+
14
+ class CompassHeaderDecoder(DataDecoder):
15
+ """
16
+ Decode CoMPASS header data. Also, read in CoMPASS config data if provided using the compass_config_parser
17
+ """
18
+
19
+ def __init__(self, *args, **kwargs) -> None:
20
+ super().__init__(*args, **kwargs)
21
+ self.config = None # initialize to none, because compass_config_to_struct always returns a struct
22
+
23
+ def decode_header(self, in_stream: bytes, config_file: str = None) -> dict:
24
+ """Decode the CoMPASS file header, and add CoMPASS config data to the header, if present.
25
+
26
+ Parameters
27
+ ----------
28
+ in_stream
29
+ The stream of data to have its header decoded
30
+ config_file
31
+ The config file for the CoMPASS data, if present
32
+
33
+ Returns
34
+ -------
35
+ config
36
+ A dict containing the header information, as well as the important config information
37
+ of wf_len and num_enabled_channels
38
+ """
39
+ wf_len = None
40
+ config_names = [
41
+ "energy_channels", # energy is given in channels (0: false, 1: true)
42
+ "energy_calibrated", # energy is given in keV/MeV, according to the calibration (0: false, 1: true)
43
+ "energy_short", # energy short is present (0: false, 1: true)
44
+ "waveform_samples", # waveform samples are present (0: false, 1: true)
45
+ "header_present", # there is a 2 byte header present in the file (0: false, 1: true)
46
+ ] # need to determine which of these are present in a file
47
+
48
+ # First need to check if the first two bytes are of the form 0xCAEx
49
+ # CoMPASS specs say that every file should start with this header, but if CoMPASS writes size-limited files, then this header may not be present in *all* files...
50
+ header_in_bytes = in_stream.read(2)
51
+
52
+ if header_in_bytes[-1] == int.from_bytes(b"\xca", byteorder="big"):
53
+ log.debug("header is present in file.")
54
+ header_in_binary = bin(int.from_bytes(header_in_bytes, byteorder="little"))
55
+ header_as_list = str(header_in_binary)[
56
+ ::-1
57
+ ] # reverse it as we care about bit 0, bit 1, etc.
58
+ header_dict = dict(
59
+ {
60
+ "energy_channels": int(header_as_list[0]) == 1,
61
+ "energy_calibrated": int(header_as_list[1]) == 1,
62
+ "energy_channels_calibrated": int(header_as_list[0])
63
+ == 1 & int(header_as_list[1])
64
+ == 1,
65
+ "energy_short": int(header_as_list[2]) == 1,
66
+ "waveform_samples": int(header_as_list[3]) == 1,
67
+ "header_present": True,
68
+ }
69
+ )
70
+
71
+ # if we don't have the wf_len, get it now
72
+
73
+ if config_file is None:
74
+ if header_dict["waveform_samples"] == 0:
75
+ wf_len = 0
76
+ else:
77
+ wf_byte_len = 4
78
+ bytes_to_read = (
79
+ 12 # covers 2-byte board, 2-byte channel, 8-byte time stamp
80
+ )
81
+ bytes_to_read += (
82
+ 2 * header_dict["energy_channels"]
83
+ + 8 * header_dict["energy_calibrated"]
84
+ + 2 * header_dict["energy_short"]
85
+ )
86
+ bytes_to_read += 4 + 1 # 4-byte flags, 1-byte waveform code
87
+ first_bytes = in_stream.read(bytes_to_read + wf_byte_len)
88
+
89
+ wf_len = np.frombuffer(
90
+ first_bytes[bytes_to_read : bytes_to_read + wf_byte_len],
91
+ dtype=np.uint32,
92
+ )[0]
93
+
94
+ # if header is not present, we need to play some tricks
95
+ # either energy short is present or not
96
+ # and one of three options for energy (ADC, calibrated, both)
97
+ else:
98
+ # If the 2 byte header is not present, then we have read in the board by accident
99
+ header_in_bytes += in_stream.read(
100
+ 10
101
+ ) # read in the 2-byte ch and 8-byte timestamp
102
+ bytes_read = 12
103
+ fixed_header_start_len = (
104
+ 12 # always 12 bytes: 2-byte board, 2-byte channel, 8-byte timestamp
105
+ )
106
+ possible_energy_header_byte_lengths = [
107
+ 2,
108
+ 8,
109
+ 10,
110
+ ] # either ADC, Calibrated, or both
111
+ possible_energy_short_header_byte_lengths = [
112
+ 0,
113
+ 2,
114
+ ] # energy short is present or not
115
+ fixed_header_part = 5 # 5 bytes from flags + code
116
+ wf_len_bytes = 4 # wf_len is 4 bytes long
117
+
118
+ for prefix in possible_energy_header_byte_lengths:
119
+ terminate = False
120
+ for suffix in possible_energy_short_header_byte_lengths:
121
+
122
+ # ---- first packet -------
123
+ # don't read more than we have to, check how many more bytes we need to read in
124
+ difference = (
125
+ fixed_header_start_len
126
+ + prefix
127
+ + suffix
128
+ + fixed_header_part
129
+ + wf_len_bytes
130
+ - bytes_read
131
+ )
132
+ if difference > 0:
133
+ # just read a bit more data
134
+ header_in_bytes += in_stream.read(difference)
135
+ bytes_read += difference
136
+
137
+ wf_len_guess = np.frombuffer(
138
+ header_in_bytes[
139
+ fixed_header_start_len
140
+ + prefix
141
+ + suffix
142
+ + fixed_header_part : fixed_header_start_len
143
+ + prefix
144
+ + suffix
145
+ + fixed_header_part
146
+ + wf_len_bytes
147
+ ],
148
+ dtype=np.uint32,
149
+ )[0]
150
+
151
+ # read in the first waveform data
152
+ difference = (
153
+ fixed_header_start_len
154
+ + prefix
155
+ + suffix
156
+ + fixed_header_part
157
+ + wf_len_bytes
158
+ + 2 * wf_len_guess
159
+ - bytes_read
160
+ )
161
+ if difference > 0:
162
+ header_in_bytes += in_stream.read(2 * wf_len_guess)
163
+ bytes_read += 2 * wf_len_guess
164
+
165
+ # ------ second packet header ----------
166
+ difference = (
167
+ 2
168
+ * (
169
+ fixed_header_start_len
170
+ + prefix
171
+ + suffix
172
+ + fixed_header_part
173
+ + wf_len_bytes
174
+ )
175
+ + 2 * wf_len_guess
176
+ - bytes_read
177
+ )
178
+ if difference > 0:
179
+ header_in_bytes += in_stream.read(difference)
180
+ bytes_read += (
181
+ fixed_header_start_len
182
+ + prefix
183
+ + suffix
184
+ + fixed_header_part
185
+ + wf_len_bytes
186
+ )
187
+ wf_len_guess_2 = np.frombuffer(
188
+ header_in_bytes[
189
+ 2
190
+ * (
191
+ fixed_header_start_len
192
+ + prefix
193
+ + suffix
194
+ + fixed_header_part
195
+ )
196
+ + wf_len_bytes
197
+ + 2
198
+ * wf_len_guess : 2
199
+ * (
200
+ fixed_header_start_len
201
+ + prefix
202
+ + suffix
203
+ + fixed_header_part
204
+ + wf_len_bytes
205
+ )
206
+ + 2 * wf_len_guess
207
+ ],
208
+ dtype=np.uint32,
209
+ )[0]
210
+
211
+ # if the waveform lengths agree, then we can stride packets correctly
212
+ if wf_len_guess_2 == wf_len_guess:
213
+ header_dict = dict(
214
+ {
215
+ "energy_channels": prefix == 2,
216
+ "energy_calibrated": prefix == 8,
217
+ "energy_channels_calibrated": prefix == 10,
218
+ "energy_short": suffix == 2,
219
+ "waveform_samples": wf_len != 0,
220
+ "header_present": False,
221
+ }
222
+ )
223
+ wf_len = wf_len_guess
224
+ terminate = True
225
+ break
226
+ if terminate:
227
+ break
228
+
229
+ self.config = compass_config_to_struct(config_file, wf_len)
230
+
231
+ for name in config_names:
232
+ if name in self.config:
233
+ log.warning(f"{name} already in self.config. skipping...")
234
+ continue
235
+ value = int(header_dict[name])
236
+ self.config.add_field(
237
+ str(name), lgdo.Scalar(value)
238
+ ) # self.config is a struct
239
+
240
+ return self.config
241
+
242
+ def make_lgdo(self, key: int = None, size: int = None) -> lgdo.Struct:
243
+ if self.config is None:
244
+ raise RuntimeError(
245
+ "self.config still None, need to decode header before calling make_lgdo"
246
+ )
247
+ return self.config # self.config is already an lgdo, namely it is a struct
@@ -72,22 +72,6 @@ class CompassStreamer(DataStreamer):
72
72
  So, we must read this header once, and then proceed to read packets in.
73
73
  """
74
74
  # If a config file is not present, the wf_len can be determined by opening the first few bytes of the in_stream
75
- wf_len = None
76
- if self.compass_config_file is None:
77
- self.set_in_stream(stream_name)
78
-
79
- first_bytes = self.in_stream.read(27)
80
-
81
- energy_short = str(
82
- bin(int.from_bytes(first_bytes[:2], byteorder="little"))
83
- )[::-1][2]
84
-
85
- if int(energy_short) == 1:
86
- [wf_len] = np.frombuffer(first_bytes[23:27], dtype=np.uint32)
87
- else:
88
- [wf_len] = np.frombuffer(first_bytes[21:25], dtype=np.uint32)
89
-
90
- self.close_stream()
91
75
 
92
76
  # set the in_stream
93
77
  self.set_in_stream(stream_name)
@@ -95,11 +79,20 @@ class CompassStreamer(DataStreamer):
95
79
 
96
80
  # read in and decode the file header info, passing the compass_config_file, if present
97
81
  self.header = self.header_decoder.decode_header(
98
- self.in_stream, self.compass_config_file, wf_len
82
+ self.in_stream, self.compass_config_file
99
83
  ) # returns an lgdo.Struct
100
- self.n_bytes_read += (
101
- 2 # there are 2 bytes in the header, for a 16 bit number to read out
102
- )
84
+ self.close_stream()
85
+
86
+ # Now we are ready to read the data
87
+ self.set_in_stream(stream_name)
88
+ self.n_bytes_read = 0
89
+
90
+ if int(self.header["header_present"].value) == 1:
91
+ # read 2 bytes if we need to
92
+ self.in_stream.read(2)
93
+ self.n_bytes_read += (
94
+ 2 # there are 2 bytes in the header, for a 16 bit number to read out
95
+ )
103
96
 
104
97
  # set up data loop variables
105
98
  self.packet_id = 0
@@ -171,16 +164,34 @@ class CompassStreamer(DataStreamer):
171
164
  if self.in_stream is None:
172
165
  raise RuntimeError("self.in_stream is None")
173
166
 
174
- if (self.packet_id == 0) and (self.n_bytes_read != 2):
167
+ if (
168
+ (self.packet_id == 0)
169
+ and (self.n_bytes_read != 2)
170
+ and (int(self.header["header_present"].value) == 1)
171
+ ):
175
172
  raise RuntimeError(
176
173
  f"The 2 byte filer header was not converted, instead read in {self.n_bytes_read} for the file header"
177
174
  )
175
+ if (
176
+ (self.packet_id == 0)
177
+ and (self.n_bytes_read != 0)
178
+ and (int(self.header["header_present"].value) == 0)
179
+ ):
180
+ raise RuntimeError(
181
+ f"The header was not converted, instead read in {self.n_bytes_read} for the file header"
182
+ )
178
183
 
179
- # packets have metadata of variable lengths, depending on if the header shows that energy_short is present in the metadata
184
+ # packets have metadata of variable lengths, depending on what the header shows
185
+ header_length = 12 # header always starts with 2-bytes of board, 2-bytes of channel, and 8-bytes of timestamp
186
+ if int(self.header["energy_channels"].value) == 1:
187
+ header_length += 2 # if the energy is recorded in ADC channels, then there are an extra 2 bytes in the metadata
188
+ if int(self.header["energy_calibrated"].value) == 1:
189
+ header_length += 8 # if the energy is recorded in keV/MeV, then there are an extra 8 bytes in the metadata
180
190
  if int(self.header["energy_short"].value) == 1:
181
- header_length = 25 # if the energy short is present, then there are an extra 2 bytes in the metadata
182
- else:
183
- header_length = 23 # the normal packet metadata is 23 bytes long
191
+ header_length += 2 # if the energy short is present, then there are an extra 2 bytes in the metadata
192
+ header_length += (
193
+ 4 + 1 + 4
194
+ ) # the flags, the waveform code bytes, and the waveform length
184
195
 
185
196
  # read the packet's metadata into the buffer
186
197
  pkt_hdr = self.buffer[:header_length]
@@ -190,16 +201,11 @@ class CompassStreamer(DataStreamer):
190
201
  # return None once we run out of file
191
202
  if n_bytes_read == 0:
192
203
  return None
193
- if (n_bytes_read != 25) and (n_bytes_read != 23):
204
+ if n_bytes_read not in [23, 25, 29, 31, 33]:
194
205
  raise RuntimeError(f"only got {n_bytes_read} bytes for packet header")
195
206
 
196
- # get the waveform length so we can read in the rest of the packet
197
- if n_bytes_read == 25:
198
- [num_samples] = np.frombuffer(pkt_hdr[21:25], dtype=np.uint32)
199
- pkt_length = header_length + 2 * num_samples
200
- if n_bytes_read == 23:
201
- [num_samples] = np.frombuffer(pkt_hdr[19:23], dtype=np.uint32)
202
- pkt_length = header_length + 2 * num_samples
207
+ [num_samples] = np.frombuffer(pkt_hdr[-4:], dtype=np.uint32)
208
+ pkt_length = header_length + 2 * num_samples
203
209
 
204
210
  # load into buffer, resizing as necessary
205
211
  if len(self.buffer) < pkt_length:
@@ -152,24 +152,41 @@ class FCEventDecoder(DataDecoder):
152
152
  tbl["board_id"].nda[loc] = fcio.event.card_address[ii]
153
153
  tbl["fc_input"].nda[loc] = fcio.event.card_channel[ii]
154
154
  tbl["event_type"].nda[loc] = fcio.event.type
155
- tbl["eventnumber"].nda[loc] = fcio.event.timestamp[0]
156
155
  tbl["numtraces"].nda[loc] = fcio.event.num_traces
157
- tbl["ts_pps"].nda[loc] = fcio.event.timestamp[1]
158
- tbl["ts_ticks"].nda[loc] = fcio.event.timestamp[2]
159
- tbl["ts_maxticks"].nda[loc] = fcio.event.timestamp[3]
160
- tbl["mu_offset_sec"].nda[loc] = fcio.event.timeoffset[0]
161
- tbl["mu_offset_usec"].nda[loc] = fcio.event.timeoffset[1]
162
- tbl["to_master_sec"].nda[loc] = fcio.event.timeoffset[2]
163
- tbl["delta_mu_usec"].nda[loc] = fcio.event.timeoffset[3]
164
- tbl["abs_delta_mu_usec"].nda[loc] = fcio.event.timeoffset[4]
165
- tbl["to_start_sec"].nda[loc] = fcio.event.timeoffset[5]
166
- tbl["to_start_usec"].nda[loc] = fcio.event.timeoffset[6]
167
- tbl["dr_start_pps"].nda[loc] = fcio.event.deadregion[0]
168
- tbl["dr_start_ticks"].nda[loc] = fcio.event.deadregion[1]
169
- tbl["dr_stop_pps"].nda[loc] = fcio.event.deadregion[2]
170
- tbl["dr_stop_ticks"].nda[loc] = fcio.event.deadregion[3]
171
- tbl["dr_maxticks"].nda[loc] = fcio.event.deadregion[4]
172
- # if event_type == 11: provides the same check
156
+
157
+ # the order of names is crucial here!
158
+ timestamp_names = [
159
+ "eventnumber",
160
+ "ts_pps",
161
+ "ts_ticks",
162
+ "ts_maxticks",
163
+ ]
164
+ for name, value in zip(timestamp_names, fcio.event.timestamp):
165
+ tbl[name].nda[loc] = value
166
+
167
+ timeoffset_names = [
168
+ "mu_offset_sec",
169
+ "mu_offset_usec",
170
+ "to_master_sec",
171
+ "delta_mu_usec",
172
+ "abs_delta_mu_usec",
173
+ "to_start_sec",
174
+ "to_start_usec",
175
+ ]
176
+ for name, value in zip(timeoffset_names, fcio.event.timeoffset):
177
+ tbl[name].nda[loc] = value
178
+
179
+ deadregion_names = [
180
+ "dr_start_pps",
181
+ "dr_start_ticks",
182
+ "dr_stop_pps",
183
+ "dr_stop_ticks",
184
+ "dr_maxticks",
185
+ ]
186
+ for name, value in zip(deadregion_names, fcio.event.deadregion[:5]):
187
+ tbl[name].nda[loc] = value
188
+
189
+ # if event_type == 11: would provide the same check
173
190
  # however, the usage of deadregion[5]/[6] must never change
174
191
  # and it will always be present if deadregion[7..] is ever used
175
192
  if fcio.event.deadregion_size >= 7:
@@ -179,7 +196,7 @@ class FCEventDecoder(DataDecoder):
179
196
  tbl["dr_ch_idx"].nda[loc] = 0
180
197
  tbl["dr_ch_len"].nda[loc] = fcio.config.adcs
181
198
 
182
- # The following values are calculated values by fcio-py
199
+ # The following values are calculated by fcio-py, derived from fields above.
183
200
  tbl["timestamp"].nda[loc] = fcio.event.unix_time_utc_sec
184
201
  tbl["deadinterval_nsec"].nda[loc] = fcio.event.dead_interval_nsec[ii]
185
202
  tbl["deadtime"].nda[loc] = fcio.event.dead_time_sec[ii]
@@ -14,9 +14,12 @@ log = logging.getLogger(__name__)
14
14
 
15
15
  def get_key(streamid: int, card_address: int, card_input: int, iwf: int = -1) -> int:
16
16
  if streamid > 0 or iwf < 0:
17
- # For backwards compatibility only the lower 16-bit of the streamid are
18
- # used.
19
- return (streamid & 0xFFFF) * 1000000 + card_address * 100 + card_input
17
+ # For backwards compatibility only the lower 16-bit of the streamid are used.
18
+ return (
19
+ (int(streamid) & 0xFFFF) * 1000000
20
+ + int(card_address) * 100
21
+ + int(card_input)
22
+ )
20
23
  else:
21
24
  return iwf
22
25
 
@@ -8,7 +8,7 @@ import logging
8
8
  import numpy as np
9
9
 
10
10
  from ..data_streamer import DataStreamer
11
- from ..raw_buffer import RawBuffer, RawBufferLibrary
11
+ from ..raw_buffer import RawBuffer, RawBufferLibrary, RawBufferList
12
12
  from . import orca_packet
13
13
  from .orca_base import OrcaDecoder
14
14
  from .orca_digitizers import ( # noqa: F401
@@ -351,6 +351,18 @@ class OrcaStreamer(DataStreamer):
351
351
  if rb_lib is not None and "*" not in rb_lib:
352
352
  keep_decoders = []
353
353
  for name in decoder_names:
354
+ # Decoding ORFCIO streams requires decoding ORFCIOConfig packets,
355
+ # as it opens the wrapped fcio stream (in orca_fcio.py) and decodes the fields
356
+ # required for the other FCIO packets.
357
+ # With `out_stream == ''` the lgdo buffer will be allocated, and the packet
358
+ # decoded, but not written to the out_stream the user defined.
359
+ if name == "ORFCIOConfigDecoder" and name not in rb_lib:
360
+ rb_lib[name] = RawBufferList()
361
+ rb_lib[name].append(
362
+ RawBuffer(
363
+ lgdo=None, key_list=["*"], out_name="{key}", out_stream=""
364
+ )
365
+ )
354
366
  if name in rb_lib:
355
367
  keep_decoders.append(name)
356
368
  decoder_names = keep_decoders
@@ -366,8 +378,7 @@ class OrcaStreamer(DataStreamer):
366
378
  shift_data_id=False
367
379
  )
368
380
  instantiated_decoders = {"OrcaHeaderDecoder": self.header_decoder}
369
- for data_id in id_to_dec_name_dict.keys():
370
- name = id_to_dec_name_dict[data_id]
381
+ for data_id, name in id_to_dec_name_dict.items():
371
382
  if name not in instantiated_decoders:
372
383
  if name not in globals():
373
384
  self.missing_decoders.append(data_id)
@@ -384,8 +395,6 @@ class OrcaStreamer(DataStreamer):
384
395
  chunk_mode=chunk_mode,
385
396
  out_stream=out_stream,
386
397
  )
387
- if rb_lib is None:
388
- rb_lib = self.rb_lib
389
398
  good_buffers = []
390
399
  for data_id in self.decoder_id_dict.keys():
391
400
  name = id_to_dec_name_dict[data_id]
@@ -401,8 +410,8 @@ class OrcaStreamer(DataStreamer):
401
410
  log.debug(f"rb_lib = {self.rb_lib}")
402
411
 
403
412
  # return header raw buffer
404
- if "OrcaHeaderDecoder" in rb_lib:
405
- header_rb_list = rb_lib["OrcaHeaderDecoder"]
413
+ if "OrcaHeaderDecoder" in self.rb_lib:
414
+ header_rb_list = self.rb_lib["OrcaHeaderDecoder"]
406
415
  if len(header_rb_list) != 1:
407
416
  log.warning(
408
417
  f"header_rb_list had length {len(header_rb_list)}, ignoring all but the first"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: legend-daq2lh5
3
- Version: 1.6.2
3
+ Version: 1.6.3
4
4
  Summary: Convert digitizer data to LH5
5
5
  Author-email: Jason Detwiler <jasondet@uw.edu>
6
6
  Maintainer: The LEGEND collaboration
@@ -40,7 +40,6 @@ def compass_config_no_settings(lgnd_test_data):
40
40
  test_file = lgnd_test_data.get_path("compass/compass_test_data.BIN")
41
41
  in_stream = open(test_file, "rb")
42
42
  compass_config_file = None
43
- wf_len = 1000
44
- decoder.decode_header(in_stream, compass_config_file, wf_len)
43
+ decoder.decode_header(in_stream, compass_config_file)
45
44
  in_stream.close()
46
45
  return decoder.config
@@ -46,6 +46,7 @@ def test_values(compass_config):
46
46
  "energy_channels": 1,
47
47
  "energy_short": 1,
48
48
  "waveform_samples": 1,
49
+ "header_present": 1,
49
50
  }
50
51
  # We have a nested struct, so we need some nasty recursion
51
52
  for key in compass_config.keys():
@@ -136,6 +137,7 @@ def test_values_no_config(compass_config_no_settings):
136
137
  "energy_channels": 1,
137
138
  "energy_short": 1,
138
139
  "waveform_samples": 1,
140
+ "header_present": 1,
139
141
  }
140
142
  # We have a nested struct, so we need some nasty recursion
141
143
  for key in compass_config_no_settings.keys():
@@ -48,7 +48,7 @@ def test_orfcio_config_decoding_swt(orca_stream_fcio_swt, fcio_swt_packets):
48
48
  assert name == "ORFCIOConfigDecoder"
49
49
 
50
50
 
51
- def test_orfcio_waveform_decoding_swt(orca_stream_fcio_swt, fcio_swt_packets):
51
+ def test_orfcio_eventheader_decoding_swt(orca_stream_fcio_swt, fcio_swt_packets):
52
52
  wf_packet = fcio_swt_packets[1]
53
53
  assert wf_packet is not None
54
54
 
@@ -9,6 +9,7 @@ import lgdo.lh5 as lh5
9
9
  import numpy as np
10
10
 
11
11
  from daq2lh5 import build_raw
12
+ from daq2lh5.fc.fc_event_decoder import get_key
12
13
  from daq2lh5.orca import orca_streamer
13
14
  from daq2lh5.orca.orca_flashcam import ORFlashCamListenerConfigDecoder
14
15
  from daq2lh5.orca.orca_run_decoder import ORRunDecoderForRun
@@ -265,3 +266,71 @@ def test_daq_to_raw(lgnd_test_data, tmptestdir):
265
266
  # assert the byte strings are the same
266
267
  assert len(rebuilt_orca_data) == len(orig_orca_data)
267
268
  assert rebuilt_orca_data == orig_orca_data
269
+
270
+
271
+ def test_daq_to_raw_orfcio_outspec(lgnd_test_data, tmptestdir):
272
+ """Test function for the daq to raw validation."""
273
+
274
+ # open orca daq file and create LH5 file
275
+ orca_file = lgnd_test_data.get_path(
276
+ "orca/fcio/l200-p14-r004-cal-20250606T010224Z.orca"
277
+ )
278
+
279
+ filekey = f"{tmptestdir}/l200-p14-r004-cal-20250606T010224Z.lh5"
280
+
281
+ expected_top_level_keys = [
282
+ "ch1105600",
283
+ "ch1105604",
284
+ "ch1107202",
285
+ "ch1113600",
286
+ "ch1115200",
287
+ "ch1115205",
288
+ ]
289
+
290
+ # Test minimal waveform out_spec. Test ORFCIOConfigDecoder hidden auto-decoding in build_raw
291
+
292
+ out_spec = {
293
+ "ORFCIOEventDecoder": {
294
+ "ch{key:07d}/raw": {
295
+ "key_list": ["*"],
296
+ "out_stream": f"{filekey}",
297
+ }
298
+ }
299
+ }
300
+
301
+ build_raw(
302
+ orca_file,
303
+ in_stream_type="ORCA",
304
+ out_spec=out_spec,
305
+ overwrite=True,
306
+ )
307
+
308
+ top_level_keys = lh5.ls(filekey, "/")
309
+ assert top_level_keys == expected_top_level_keys
310
+
311
+ # Test default out_spec: filekey is out_stream, no spec given.
312
+
313
+ build_raw(
314
+ orca_file,
315
+ in_stream_type="ORCA",
316
+ out_spec=filekey,
317
+ overwrite=True,
318
+ )
319
+
320
+ top_level_keys = lh5.ls(filekey, "/")
321
+
322
+ rawid_components = lh5.read(
323
+ "ORFCIOEvent_0", filekey, field_mask=["board_id", "fc_input", "fcid"]
324
+ )
325
+ auto_decoded_keys = sorted(
326
+ {
327
+ f"ch{get_key(fcid, board_id, fc_input)}"
328
+ for fcid, board_id, fc_input in zip(
329
+ rawid_components["fcid"],
330
+ rawid_components["board_id"],
331
+ rawid_components["fc_input"],
332
+ )
333
+ }
334
+ )
335
+
336
+ assert auto_decoded_keys == expected_top_level_keys
@@ -1,74 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import logging
4
-
5
- import lgdo
6
-
7
- from ..data_decoder import DataDecoder
8
- from .compass_config_parser import compass_config_to_struct
9
-
10
- log = logging.getLogger(__name__)
11
-
12
-
13
- class CompassHeaderDecoder(DataDecoder):
14
- """
15
- Decode CoMPASS header data. Also, read in CoMPASS config data if provided using the compass_config_parser
16
- """
17
-
18
- def __init__(self, *args, **kwargs) -> None:
19
- super().__init__(*args, **kwargs)
20
- self.config = None # initialize to none, because compass_config_to_struct always returns a struct
21
-
22
- def decode_header(
23
- self, in_stream: bytes, config_file: str = None, wf_len: int = None
24
- ) -> dict:
25
- """Decode the CoMPASS file header, and add CoMPASS config data to the header, if present.
26
-
27
- Parameters
28
- ----------
29
- in_stream
30
- The stream of data to have its header decoded
31
- config_file
32
- The config file for the CoMPASS data, if present
33
- wf_len
34
- The length of the first waveform in the file, only pre-calculated when the config_file is none
35
-
36
- Returns
37
- -------
38
- config
39
- A dict containing the header information, as well as the important config information
40
- of wf_len and num_enabled_channels
41
- """
42
- self.config = compass_config_to_struct(config_file, wf_len)
43
-
44
- config_names = [
45
- "energy_channels", # energy is given in channels (0: false, 1: true)
46
- "energy_calibrated", # energy is given in keV/MeV, according to the calibration (0: false, 1: true)
47
- "energy_short", # energy short is present (0: false, 1: true)
48
- "waveform_samples", # waveform samples are present (0: false, 1: true)
49
- ]
50
- header_in_bytes = in_stream.read(
51
- 2
52
- ) # we always have to read in the first 2 bytes of the header for CoMPASS v2 files
53
- header_in_binary = bin(int.from_bytes(header_in_bytes, byteorder="little"))
54
- header_as_list = str(header_in_binary)[
55
- ::-1
56
- ] # reverse it as we care about bit 0, bit 1, etc.
57
-
58
- for i, name in enumerate(config_names):
59
- if name in self.config:
60
- log.warning(f"{name} already in self.config. skipping...")
61
- continue
62
- value = int(header_as_list[i])
63
- self.config.add_field(
64
- str(name), lgdo.Scalar(value)
65
- ) # self.config is a struct
66
-
67
- return self.config
68
-
69
- def make_lgdo(self, key: int = None, size: int = None) -> lgdo.Struct:
70
- if self.config is None:
71
- raise RuntimeError(
72
- "self.config still None, need to decode header before calling make_lgdo"
73
- )
74
- return self.config # self.config is already an lgdo, namely it is a struct
File without changes
File without changes
File without changes