legend-daq2lh5 1.2.2__tar.gz → 1.4.0__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (78) hide show
  1. {legend_daq2lh5-1.2.2/src/legend_daq2lh5.egg-info → legend_daq2lh5-1.4.0}/PKG-INFO +3 -2
  2. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/setup.cfg +1 -0
  3. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/_version.py +2 -2
  4. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/buffer_processor/lh5_buffer_processor.py +7 -5
  5. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/build_raw.py +19 -14
  6. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/data_streamer.py +1 -1
  7. legend_daq2lh5-1.4.0/src/daq2lh5/llama/llama_base.py +14 -0
  8. legend_daq2lh5-1.4.0/src/daq2lh5/llama/llama_event_decoder.py +328 -0
  9. legend_daq2lh5-1.4.0/src/daq2lh5/llama/llama_header_decoder.py +149 -0
  10. legend_daq2lh5-1.4.0/src/daq2lh5/llama/llama_streamer.py +156 -0
  11. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/logging.py +2 -0
  12. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/raw_buffer.py +78 -82
  13. legend_daq2lh5-1.4.0/src/daq2lh5/utils.py +35 -0
  14. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0/src/legend_daq2lh5.egg-info}/PKG-INFO +3 -2
  15. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/legend_daq2lh5.egg-info/SOURCES.txt +9 -0
  16. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/legend_daq2lh5.egg-info/requires.txt +1 -0
  17. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/conftest.py +1 -1
  18. legend_daq2lh5-1.4.0/tests/llama/conftest.py +7 -0
  19. legend_daq2lh5-1.4.0/tests/llama/test_llama_event_decoder.py +127 -0
  20. legend_daq2lh5-1.4.0/tests/llama/test_llama_header_decoder.py +16 -0
  21. legend_daq2lh5-1.4.0/tests/llama/test_llama_streamer.py +36 -0
  22. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/test_raw_buffer.py +3 -3
  23. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/LICENSE +0 -0
  24. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/README.md +0 -0
  25. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/pyproject.toml +0 -0
  26. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/setup.py +0 -0
  27. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/__init__.py +0 -0
  28. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/buffer_processor/__init__.py +0 -0
  29. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/buffer_processor/buffer_processor.py +0 -0
  30. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/cli.py +0 -0
  31. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/compass/__init__.py +0 -0
  32. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/compass/compass_config_parser.py +0 -0
  33. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/compass/compass_event_decoder.py +0 -0
  34. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/compass/compass_header_decoder.py +0 -0
  35. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/compass/compass_streamer.py +0 -0
  36. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/data_decoder.py +0 -0
  37. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/fc/__init__.py +0 -0
  38. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/fc/fc_config_decoder.py +0 -0
  39. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/fc/fc_event_decoder.py +0 -0
  40. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/fc/fc_status_decoder.py +0 -0
  41. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/fc/fc_streamer.py +0 -0
  42. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/orca/__init__.py +0 -0
  43. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/orca/orca_base.py +0 -0
  44. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/orca/orca_digitizers.py +0 -0
  45. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/orca/orca_flashcam.py +0 -0
  46. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/orca/orca_header.py +0 -0
  47. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/orca/orca_header_decoder.py +0 -0
  48. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/orca/orca_packet.py +0 -0
  49. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/orca/orca_run_decoder.py +0 -0
  50. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/daq2lh5/orca/orca_streamer.py +0 -0
  51. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/legend_daq2lh5.egg-info/dependency_links.txt +0 -0
  52. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/legend_daq2lh5.egg-info/entry_points.txt +0 -0
  53. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/legend_daq2lh5.egg-info/not-zip-safe +0 -0
  54. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/src/legend_daq2lh5.egg-info/top_level.txt +0 -0
  55. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/buffer_processor/test_buffer_processor.py +0 -0
  56. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/buffer_processor/test_buffer_processor_configs/buffer_processor_config.json +0 -0
  57. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/buffer_processor/test_buffer_processor_configs/lh5_buffer_processor_config.json +0 -0
  58. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/buffer_processor/test_buffer_processor_configs/raw_out_spec_no_proc.json +0 -0
  59. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/buffer_processor/test_lh5_buffer_processor.py +0 -0
  60. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/compass/conftest.py +0 -0
  61. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/compass/test_compass_event_decoder.py +0 -0
  62. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/compass/test_compass_header_decoder.py +0 -0
  63. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/compass/test_compass_streamer.py +0 -0
  64. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/configs/fc-out-spec.json +0 -0
  65. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/configs/orca-out-spec-cli.json +0 -0
  66. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/configs/orca-out-spec.json +0 -0
  67. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/fc/conftest.py +0 -0
  68. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/fc/test_fc_config_decoder.py +0 -0
  69. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/fc/test_fc_event_decoder.py +0 -0
  70. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/fc/test_fc_status_decoder.py +0 -0
  71. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/fc/test_fc_streamer.py +0 -0
  72. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/orca/conftest.py +0 -0
  73. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/orca/test_or_run_decoder_for_run.py +0 -0
  74. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/orca/test_orca_fc.py +0 -0
  75. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/orca/test_orca_packet.py +0 -0
  76. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/test_build_raw.py +0 -0
  77. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/test_cli.py +0 -0
  78. {legend_daq2lh5-1.2.2 → legend_daq2lh5-1.4.0}/tests/test_daq_to_raw.py +0 -0
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.2
2
2
  Name: legend_daq2lh5
3
- Version: 1.2.2
3
+ Version: 1.4.0
4
4
  Summary: Convert digitizer data to LH5
5
5
  Home-page: https://github.com/legend-exp/legend-daq2lh5
6
6
  Author: Jason Detwiler
@@ -32,6 +32,7 @@ Requires-Dist: hdf5plugin
32
32
  Requires-Dist: legend-pydataobj>=1.6
33
33
  Requires-Dist: numpy>=1.21
34
34
  Requires-Dist: pyfcutils
35
+ Requires-Dist: pyyaml
35
36
  Requires-Dist: tqdm>=4.27
36
37
  Requires-Dist: xmltodict
37
38
  Provides-Extra: all
@@ -37,6 +37,7 @@ install_requires =
37
37
  legend-pydataobj>=1.6
38
38
  numpy>=1.21
39
39
  pyfcutils
40
+ pyyaml
40
41
  tqdm>=4.27
41
42
  xmltodict
42
43
  python_requires = >=3.9
@@ -12,5 +12,5 @@ __version__: str
12
12
  __version_tuple__: VERSION_TUPLE
13
13
  version_tuple: VERSION_TUPLE
14
14
 
15
- __version__ = version = '1.2.2'
16
- __version_tuple__ = version_tuple = (1, 2, 2)
15
+ __version__ = version = '1.4.0'
16
+ __version_tuple__ = version_tuple = (1, 4, 0)
@@ -1,6 +1,5 @@
1
1
  from __future__ import annotations
2
2
 
3
- import json
4
3
  import logging
5
4
  import os
6
5
 
@@ -8,6 +7,7 @@ import h5py
8
7
  import lgdo
9
8
  from lgdo import lh5
10
9
 
10
+ from .. import utils
11
11
  from ..buffer_processor.buffer_processor import buffer_processor
12
12
  from ..raw_buffer import RawBuffer, RawBufferLibrary
13
13
 
@@ -104,11 +104,13 @@ def lh5_buffer_processor(
104
104
  raw_store.gimme_file(proc_file_name, "a")
105
105
 
106
106
  # Do key expansion on the out_spec
107
- if isinstance(out_spec, str) and out_spec.endswith(".json"):
108
- with open(out_spec) as json_file:
109
- out_spec = json.load(json_file)
107
+ allowed_exts = [ext for exts in utils.__file_extensions__.values() for ext in exts]
108
+ if isinstance(out_spec, str) and any(
109
+ [out_spec.endswith(ext) for ext in allowed_exts]
110
+ ):
111
+ out_spec = utils.load_dict(out_spec)
110
112
  if isinstance(out_spec, dict):
111
- RawBufferLibrary(json_dict=out_spec)
113
+ RawBufferLibrary(config=out_spec)
112
114
 
113
115
  # Write everything in the raw file to the new file, check for proc_spec under either the group name, out_name, or the name
114
116
  for tb in lh5_tables:
@@ -1,7 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import glob
4
- import json
5
4
  import logging
6
5
  import os
7
6
  import time
@@ -10,8 +9,10 @@ import numpy as np
10
9
  from lgdo import lh5
11
10
  from tqdm.auto import tqdm
12
11
 
12
+ from . import utils
13
13
  from .compass.compass_streamer import CompassStreamer
14
14
  from .fc.fc_streamer import FCStreamer
15
+ from .llama.llama_streamer import LLAMAStreamer
15
16
  from .orca.orca_streamer import OrcaStreamer
16
17
  from .raw_buffer import RawBufferLibrary, write_to_lh5_and_clear
17
18
 
@@ -48,12 +49,14 @@ def build_raw(
48
49
  Specification for the output stream.
49
50
 
50
51
  - if None, uses ``{in_stream}.lh5`` as the output filename.
51
- - if a str not ending in ``.json``, interpreted as the output filename.
52
- - if a str ending in ``.json``, interpreted as a filename containing
53
- json-shorthand for the output specification (see :mod:`.raw_buffer`).
54
- - if a JSON dict, should be a dict loaded from the json shorthand
55
- notation for RawBufferLibraries (see :mod:`.raw_buffer`), which is
56
- then used to build a :class:`.RawBufferLibrary`.
52
+ - if a str not ending with a config file extension, interpreted as the
53
+ output filename.
54
+ - if a str ending with a config file extension, interpreted as a
55
+ filename containing shorthand for the output specification (see
56
+ :mod:`.raw_buffer`).
57
+ - if a dict, should be a dict loaded from the shorthand notation for
58
+ RawBufferLibraries (see :mod:`.raw_buffer`), which is then used to
59
+ build a :class:`.RawBufferLibrary`.
57
60
  - if a :class:`.RawBufferLibrary`, the mapping of data to output file /
58
61
  group is taken from that.
59
62
 
@@ -71,8 +74,8 @@ def build_raw(
71
74
 
72
75
  - if None, CompassDecoder will sacrifice the first packet to determine
73
76
  waveform length
74
- - if a str ending in ``.json``, interpreted as a filename containing
75
- json-shorthand for the output specification (see
77
+ - if a str ending with a config file extension, interpreted as a
78
+ filename containing shorthand for the output specification (see
76
79
  :mod:`.compass.compass_event_decoder`).
77
80
 
78
81
  hdf5_settings
@@ -119,11 +122,13 @@ def build_raw(
119
122
 
120
123
  # process out_spec and setup rb_lib if specified
121
124
  rb_lib = None
122
- if isinstance(out_spec, str) and out_spec.endswith(".json"):
123
- with open(out_spec) as json_file:
124
- out_spec = json.load(json_file)
125
+ allowed_exts = [ext for exts in utils.__file_extensions__.values() for ext in exts]
126
+ if isinstance(out_spec, str) and any(
127
+ [out_spec.endswith(ext) for ext in allowed_exts]
128
+ ):
129
+ out_spec = utils.load_dict(out_spec)
125
130
  if isinstance(out_spec, dict):
126
- out_spec = RawBufferLibrary(json_dict=out_spec, kw_dict=kwargs)
131
+ out_spec = RawBufferLibrary(config=out_spec, kw_dict=kwargs)
127
132
  if isinstance(out_spec, RawBufferLibrary):
128
133
  rb_lib = out_spec
129
134
  # if no rb_lib, write all data to file
@@ -180,7 +185,7 @@ def build_raw(
180
185
  elif in_stream_type == "Compass":
181
186
  streamer = CompassStreamer(compass_config_file)
182
187
  elif in_stream_type == "LlamaDaq":
183
- raise NotImplementedError("LlamaDaq streaming not yet implemented")
188
+ streamer = LLAMAStreamer()
184
189
  elif in_stream_type == "MGDO":
185
190
  raise NotImplementedError("MGDO streaming not yet implemented")
186
191
  else:
@@ -350,7 +350,7 @@ class DataStreamer(ABC):
350
350
  if len(key_list) == 1:
351
351
  this_name = f"{dec_key}_{key_list[0]}"
352
352
  else:
353
- this_name = f"{dec_key}_{ii}"
353
+ this_name = f"{dec_key}_{ii}" # this can cause a name clash e.g. for [[1],[2,3]] ...
354
354
  rb = RawBuffer(
355
355
  key_list=key_list, out_stream=out_stream, out_name=this_name
356
356
  )
@@ -0,0 +1,14 @@
1
+ """
2
+ General utilities for llamaDAQ data decoding
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import logging
8
+
9
+ log = logging.getLogger(__name__)
10
+
11
+
12
+ # build a unique flat identifier for fadc and channel together
13
+ def join_fadcid_chid(fadcid: int, chid: int) -> int:
14
+ return (fadcid << 4) + chid
@@ -0,0 +1,328 @@
1
+ from __future__ import annotations
2
+
3
+ import copy
4
+ import logging
5
+ from typing import Any
6
+
7
+ import lgdo
8
+ import numpy as np
9
+
10
+ from ..data_decoder import DataDecoder
11
+ from .llama_header_decoder import LLAMA_Channel_Configs_t
12
+
13
+ log = logging.getLogger(__name__)
14
+
15
+ # put decoded values here
16
+ llama_decoded_values_template = {
17
+ # packet index in file
18
+ "packet_id": {"dtype": "uint32"},
19
+ # combined index of FADC and channel
20
+ "fch_id": {"dtype": "uint32"},
21
+ # time since epoch
22
+ "timestamp": {"dtype": "uint64", "units": "clock_ticks"},
23
+ "status_flag": {"dtype": "uint32"},
24
+ # waveform data --> not always present
25
+ # "waveform": {
26
+ # "dtype": "uint16",
27
+ # "datatype": "waveform",
28
+ # "wf_len": 65532, # max value. override this before initializing buffers to save RAM
29
+ # "dt": 8, # override if a different clock rate is used
30
+ # "dt_units": "ns",
31
+ # "t0_units": "ns",
32
+ # }
33
+ }
34
+ # """Default llamaDAQ SIS3316 Event decoded values.
35
+ #
36
+ # Warning
37
+ # -------
38
+ # This configuration will be dynamically modified by the decoder at runtime.
39
+ # """
40
+
41
+
42
+ def check_dict_spec_equal(
43
+ d1: dict[str, Any], d2: dict[str, Any], specs: list[str]
44
+ ) -> bool:
45
+ for spec in specs:
46
+ if d1.get(spec) != d2.get(spec):
47
+ return False
48
+ return True
49
+
50
+
51
+ class LLAMAEventDecoder(DataDecoder):
52
+ """Decode llamaDAQ SIS3316 digitizer event data."""
53
+
54
+ def __init__(self, *args, **kwargs) -> None:
55
+ # these are read for every event (decode_event)
56
+ # One set of settings per fch, since settings can be different per channel group
57
+ self.decoded_values: dict[int, dict[str, Any]] = {}
58
+ super().__init__(*args, **kwargs)
59
+ self.skipped_channels = {}
60
+ self.channel_configs = None
61
+ self.dt_raw: dict[int, float] = (
62
+ {}
63
+ ) # need to buffer that to update t0 for avg waveforms per event
64
+ self.t0_raw: dict[int, float] = (
65
+ {}
66
+ ) # store when receiving channel configs and use for each waveform
67
+ self.t0_avg_const: dict[int, float] = (
68
+ {}
69
+ ) # constant part of the t0 of averaged waveforms
70
+
71
+ def set_channel_configs(self, channel_configs: LLAMA_Channel_Configs_t) -> None:
72
+ """Receive channel configurations from llama_streamer after header was parsed
73
+ Adapt self.decoded_values dict based on read configuration
74
+ """
75
+ self.channel_configs = channel_configs
76
+ for fch, config in self.channel_configs.items():
77
+ self.decoded_values[fch] = copy.deepcopy(llama_decoded_values_template)
78
+ format_bits = config["format_bits"]
79
+ sample_clock_freq = config["sample_freq"]
80
+ avg_mode = config["avg_mode"]
81
+ dt_raw: float = 1 / sample_clock_freq * 1000
82
+ dt_avg: float = dt_raw * (1 << (avg_mode + 1))
83
+ # t0 generation functions from llamaDAQ -> EventConfig.hh
84
+ t0_raw: float = (
85
+ float(config["sample_start_index"]) - float(config["sample_pretrigger"])
86
+ ) * dt_raw # location of the trigger is at t = 0
87
+ t0_avg: float = (
88
+ -float(config["sample_pretrigger"]) * float(dt_raw)
89
+ - float(config["avg_sample_pretrigger"]) * dt_avg
90
+ ) # additional offset to be added independently for every event
91
+ self.dt_raw[fch] = dt_raw
92
+ self.t0_raw[fch] = t0_raw
93
+ self.t0_avg_const[fch] = t0_avg
94
+ if config["sample_length"] > 0:
95
+ self.__add_waveform(
96
+ self.decoded_values[fch], False, config["sample_length"], dt_raw
97
+ )
98
+ if config["avg_sample_length"] > 0 and avg_mode > 0:
99
+ self.__add_waveform(
100
+ self.decoded_values[fch], True, config["avg_sample_length"], dt_avg
101
+ )
102
+ if format_bits & 0x01:
103
+ self.__add_accum1till6(self.decoded_values[fch])
104
+ if format_bits & 0x02:
105
+ self.__add_accum7and8(self.decoded_values[fch])
106
+ if format_bits & 0x04:
107
+ self.__add_maw(self.decoded_values[fch])
108
+ if format_bits & 0x08:
109
+ self.__add_energy(self.decoded_values[fch])
110
+
111
+ def get_key_lists(self) -> list[list[int | str]]:
112
+ """
113
+ Return a list of lists of keys available for this decoder.
114
+ Each inner list are the fch_id's which share the exact same settings (trace lengths, avg mode, ...),
115
+ so they can end up in the same buffer.
116
+ """
117
+ if self.channel_configs is None:
118
+ raise RuntimeError(
119
+ "Identification of key lists requires channel configs to be set!"
120
+ )
121
+
122
+ params_for_equality = ["sample_length", "avg_sample_length", "avg_mode"]
123
+
124
+ def check_equal(c1, c2):
125
+ return check_dict_spec_equal(c1, c2, params_for_equality)
126
+
127
+ kll: list[list[int]] = [] # key-list-list
128
+ for fch_id, config in self.channel_configs.items():
129
+ for kl in kll:
130
+ # use 1st entry of a list of list as "archetype"
131
+ if check_equal(config, self.channel_configs[kl[0]]):
132
+ kl.append(fch_id)
133
+ break
134
+ else:
135
+ kll.append([fch_id])
136
+ log.debug(f"key lists are: {repr(kll)}")
137
+ return kll
138
+
139
+ # copied from ORCA SIS3316
140
+ def get_decoded_values(self, key: int = None) -> dict[str, Any]:
141
+ if key is None:
142
+ raise RuntimeError("Key is None!")
143
+ dec_vals_list = self.decoded_values.values()
144
+ if len(dec_vals_list) == 0:
145
+ raise RuntimeError("decoded_values not built yet!")
146
+
147
+ return dec_vals_list # Get first thing we find
148
+ else:
149
+ dec_vals_list = self.decoded_values[key]
150
+ return dec_vals_list
151
+
152
+ def decode_packet(
153
+ self,
154
+ packet: bytes,
155
+ evt_rbkd: lgdo.Table | dict[int, lgdo.Table],
156
+ packet_id: int,
157
+ fch_id: int,
158
+ # header: lgdo.Table | dict[int, lgdo.Table]
159
+ ) -> bool:
160
+ """
161
+ Decodes a single packet, which is a single SIS3316 event, as specified in the Struck manual.
162
+ A single packet corresponds to a single event and channel, and has a unique timestamp.
163
+ packets of different channel groups can vary in size!
164
+ """
165
+
166
+ # Check if this fch_id should be recorded.
167
+ if fch_id not in evt_rbkd:
168
+ if fch_id not in self.skipped_channels:
169
+ self.skipped_channels[fch_id] = 0
170
+ log.info(f"Skipping channel: {fch_id}")
171
+ log.debug(f"evt_rbkd: {evt_rbkd.keys()}")
172
+ self.skipped_channels[fch_id] += 1
173
+ return False
174
+
175
+ tbl = evt_rbkd[fch_id].lgdo
176
+ ii = evt_rbkd[fch_id].loc
177
+
178
+ # parse the raw event data into numpy arrays of 16 and 32 bit ints
179
+ evt_data_32 = np.frombuffer(packet, dtype=np.uint32)
180
+ evt_data_16 = np.frombuffer(packet, dtype=np.uint16)
181
+
182
+ # e sti gran binaries non ce li metti
183
+ # fch_id = (evt_data_32[0] >> 4) & 0x00000fff --> to be read earlier, since we need size for chopping out the event from the stream
184
+ timestamp = ((evt_data_32[0] & 0xFFFF0000) << 16) + evt_data_32[1]
185
+ format_bits = (evt_data_32[0]) & 0x0000000F
186
+ tbl["fch_id"].nda[ii] = fch_id
187
+ tbl["packet_id"].nda[ii] = packet_id
188
+ tbl["timestamp"].nda[ii] = timestamp
189
+ offset = 2
190
+ if format_bits & 0x1:
191
+ tbl["peakHighValue"].nda[ii] = evt_data_16[4]
192
+ tbl["peakHighIndex"].nda[ii] = evt_data_16[5]
193
+ tbl["information"].nda[ii] = (evt_data_32[offset + 1] >> 24) & 0xFF
194
+ tbl["accSum1"].nda[ii] = evt_data_32[offset + 2]
195
+ tbl["accSum2"].nda[ii] = evt_data_32[offset + 3]
196
+ tbl["accSum3"].nda[ii] = evt_data_32[offset + 4]
197
+ tbl["accSum4"].nda[ii] = evt_data_32[offset + 5]
198
+ tbl["accSum5"].nda[ii] = evt_data_32[offset + 6]
199
+ tbl["accSum6"].nda[ii] = evt_data_32[offset + 7]
200
+ offset += 7
201
+ if format_bits & 0x2:
202
+ tbl["accSum7"].nda[ii] = evt_data_32[offset + 0]
203
+ tbl["accSum8"].nda[ii] = evt_data_32[offset + 1]
204
+ offset += 2
205
+ if format_bits & 0x4:
206
+ tbl["mawMax"].nda[ii] = evt_data_32[offset + 0]
207
+ tbl["mawBefore"].nda[ii] = evt_data_32[offset + 1]
208
+ tbl["mawAfter"].nda[ii] = evt_data_32[offset + 2]
209
+ offset += 3
210
+ if format_bits & 0x8:
211
+ tbl["startEnergy"].nda[ii] = evt_data_32[offset + 0]
212
+ tbl["maxEnergy"].nda[ii] = evt_data_32[offset + 1]
213
+ offset += 2
214
+
215
+ raw_length_32 = (evt_data_32[offset + 0]) & 0x03FFFFFF
216
+ tbl["status_flag"].nda[ii] = (
217
+ (evt_data_32[offset + 0]) & 0x04000000
218
+ ) >> 26 # bit 26
219
+ maw_test_flag = ((evt_data_32[offset + 0]) & 0x08000000) >> 27 # bit 27
220
+ avg_data_coming = False
221
+ if evt_data_32[offset + 0] & 0xF0000000 == 0xE0000000:
222
+ avg_data_coming = False
223
+ elif evt_data_32[offset + 0] & 0xF0000000 == 0xA0000000:
224
+ avg_data_coming = True
225
+ else:
226
+ raise RuntimeError("Data corruption 1!")
227
+ offset += 1
228
+ avg_length_32 = 0
229
+ if avg_data_coming:
230
+ avg_count_status = (
231
+ evt_data_32[offset + 0] & 0x00FF0000
232
+ ) >> 16 # bits 23 - 16
233
+ avg_length_32 = evt_data_32[offset + 0] & 0x0000FFFF
234
+ if evt_data_32[offset + 0] & 0xF0000000 != 0xE0000000:
235
+ raise RuntimeError("Data corruption 2!")
236
+ offset += 1
237
+
238
+ # --- now the offset points to the raw wf data ---
239
+
240
+ if maw_test_flag:
241
+ raise RuntimeError("Cannot handle data with MAW test data!")
242
+
243
+ # compute expected and actual array dimensions
244
+ raw_length_16 = 2 * raw_length_32
245
+ avg_length_16 = 2 * avg_length_32
246
+ header_length_16 = offset * 2
247
+ expected_wf_length = len(evt_data_16) - header_length_16
248
+
249
+ # error check: waveform size must match expectations
250
+ if raw_length_16 + avg_length_16 != expected_wf_length:
251
+ raise RuntimeError(
252
+ f"Waveform sizes {raw_length_16} (raw) and {avg_length_16} (avg) doesn't match expected size {expected_wf_length}."
253
+ )
254
+
255
+ # store waveform if available:
256
+ if raw_length_16 > 0:
257
+ tbl["waveform"]["values"].nda[ii] = evt_data_16[
258
+ offset * 2 : offset * 2 + raw_length_16
259
+ ]
260
+ offset += raw_length_32
261
+ tbl["waveform"]["t0"].nda[ii] = self.t0_raw[fch_id]
262
+
263
+ # store pre-averaged (avg) waveform if available:
264
+ if avg_length_16 > 0:
265
+ tbl["avgwaveform"]["values"].nda[ii] = evt_data_16[
266
+ offset * 2 : offset * 2 + avg_length_16
267
+ ]
268
+ offset += avg_length_32
269
+ # need to update avg waveform t0 based on the offset I get per event
270
+ tbl["avgwaveform"]["t0"].nda[ii] = (
271
+ self.t0_avg_const[fch_id]
272
+ + float(avg_count_status) * self.dt_raw[fch_id]
273
+ )
274
+
275
+ if offset != len(evt_data_32):
276
+ raise RuntimeError("I messed up...")
277
+
278
+ evt_rbkd[fch_id].loc += 1
279
+
280
+ return evt_rbkd[fch_id].is_full()
281
+
282
+ def __add_waveform(
283
+ self,
284
+ decoded_values_fch: dict[str, Any],
285
+ is_avg: bool,
286
+ max_samples: int,
287
+ dt: float,
288
+ ) -> None:
289
+ """
290
+ Averaged samples are available from the 125 MHz (16 bit) variatnt of the SIS3316 and can be stored independently of raw samples.
291
+ I use waveform for raw samples (dt from clock itself) and avgwaveform from averaged samples (dt from clock * avg number).
292
+
293
+ GERDA used to have the low-frequency (waveform) & the high-frequency (aux waveform); here: LF = avgwaveform & HF = waveform.
294
+ """
295
+ name: str = "avgwaveform" if is_avg else "waveform"
296
+ decoded_values_fch[name] = {
297
+ "dtype": "uint16",
298
+ "datatype": "waveform",
299
+ "wf_len": max_samples, # max value. override this before initializing buffers to save RAM
300
+ "dt": dt, # the sample pitch (inverse of clock speed)
301
+ # "t0": t0, # Adding t0 here does not work
302
+ "dt_units": "ns",
303
+ "t0_units": "ns",
304
+ }
305
+
306
+ def __add_accum1till6(self, decoded_values_fch: dict[str, Any]) -> None:
307
+ decoded_values_fch["peakHighValue"] = {"dtype": "uint32", "units": "adc"}
308
+ decoded_values_fch["peakHighIndex"] = {"dtype": "uint32", "units": "adc"}
309
+ decoded_values_fch["information"] = {"dtype": "uint32"}
310
+ decoded_values_fch["accSum1"] = {"dtype": "uint32", "units": "adc"}
311
+ decoded_values_fch["accSum2"] = {"dtype": "uint32", "units": "adc"}
312
+ decoded_values_fch["accSum3"] = {"dtype": "uint32", "units": "adc"}
313
+ decoded_values_fch["accSum4"] = {"dtype": "uint32", "units": "adc"}
314
+ decoded_values_fch["accSum5"] = {"dtype": "uint32", "units": "adc"}
315
+ decoded_values_fch["accSum6"] = {"dtype": "uint32", "units": "adc"}
316
+
317
+ def __add_accum7and8(self, decoded_values_fch: dict[str, Any]) -> None:
318
+ decoded_values_fch["accSum7"] = {"dtype": "uint32", "units": "adc"}
319
+ decoded_values_fch["accSum8"] = {"dtype": "uint32", "units": "adc"}
320
+
321
+ def __add_maw(self, decoded_values_fch: dict[str, Any]) -> None:
322
+ decoded_values_fch["mawMax"] = {"dtype": "uint32", "units": "adc"}
323
+ decoded_values_fch["mawBefore"] = {"dtype": "uint32", "units": "adc"}
324
+ decoded_values_fch["mawAfter"] = {"dtype": "uint32", "units": "adc"}
325
+
326
+ def __add_energy(self, decoded_values_fch: dict[str, Any]) -> None:
327
+ decoded_values_fch["startEnergy"] = {"dtype": "uint32", "units": "adc"}
328
+ decoded_values_fch["maxEnergy"] = {"dtype": "uint32", "units": "adc"}
@@ -0,0 +1,149 @@
1
+ from __future__ import annotations
2
+
3
+ import io
4
+ import logging
5
+ from typing import Any, Dict
6
+
7
+ import lgdo
8
+ import numpy as np
9
+
10
+ from ..data_decoder import DataDecoder
11
+ from .llama_base import join_fadcid_chid
12
+
13
+ log = logging.getLogger(__name__)
14
+
15
+ LLAMA_Channel_Configs_t = Dict[int, Dict[str, Any]]
16
+
17
+
18
+ class LLAMAHeaderDecoder(DataDecoder): # DataDecoder currently unused
19
+ """
20
+ Decode llamaDAQ header data. Includes the file header as well as all available ("open") channel configurations.
21
+ """
22
+
23
+ @staticmethod
24
+ def magic_bytes() -> int:
25
+ return 0x4972414C
26
+
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ super().__init__(*args, **kwargs)
29
+ self.config = lgdo.Struct()
30
+ self.channel_configs = None
31
+
32
+ def decode_header(self, f_in: io.BufferedReader) -> lgdo.Struct:
33
+ n_bytes_read = 0
34
+
35
+ f_in.seek(0) # should be there anyhow, but re-set if not
36
+ header = f_in.read(16) # read 16 bytes
37
+ n_bytes_read += 16
38
+ evt_data_32 = np.frombuffer(header, dtype=np.uint32)
39
+ evt_data_16 = np.frombuffer(header, dtype=np.uint16)
40
+
41
+ # line0: magic bytes
42
+ magic = evt_data_32[0]
43
+ # print(hex(magic))
44
+ if magic == self.magic_bytes():
45
+ log.info("Read in file as llamaDAQ-SIS3316, magic bytes correct.")
46
+ else:
47
+ log.error("Magic bytes not matching for llamaDAQ file!")
48
+ raise RuntimeError("wrong file type")
49
+
50
+ self.version_major = evt_data_16[4]
51
+ self.version_minor = evt_data_16[3]
52
+ self.version_patch = evt_data_16[2]
53
+ self.length_econf = evt_data_16[5]
54
+ self.number_chOpen = evt_data_32[3]
55
+
56
+ log.debug(
57
+ f"File version: {self.version_major}.{self.version_minor}.{self.version_patch}"
58
+ )
59
+ log.debug(
60
+ f"{self.number_chOpen} channels open, each config {self.length_econf} bytes long"
61
+ )
62
+
63
+ n_bytes_read += self.__decode_channel_configs(f_in)
64
+
65
+ # print(self.channel_configs[0]["MAW3_offset"])
66
+
67
+ # assemble LGDO struct:
68
+ self.config.add_field("version_major", lgdo.Scalar(self.version_major))
69
+ self.config.add_field("version_minor", lgdo.Scalar(self.version_minor))
70
+ self.config.add_field("version_patch", lgdo.Scalar(self.version_patch))
71
+ self.config.add_field("length_econf", lgdo.Scalar(self.length_econf))
72
+ self.config.add_field("number_chOpen", lgdo.Scalar(self.number_chOpen))
73
+
74
+ for fch_id, fch_content in self.channel_configs.items():
75
+ fch_lgdo = lgdo.Struct()
76
+ for key, value in fch_content.items():
77
+ fch_lgdo.add_field(key, lgdo.Scalar(value))
78
+ self.config.add_field(f"fch_{fch_id:02d}", fch_lgdo)
79
+
80
+ return self.config, n_bytes_read
81
+
82
+ # override from DataDecoder
83
+ def make_lgdo(self, key: int = None, size: int = None) -> lgdo.Struct:
84
+ return self.config
85
+
86
+ def get_channel_configs(self) -> LLAMA_Channel_Configs_t:
87
+ return self.channel_configs
88
+
89
+ def __decode_channel_configs(self, f_in: io.BufferedReader) -> int:
90
+ """
91
+ Reads the metadata from the beginning of the file (the "channel configuration" part, directly after the file header).
92
+ Creates a dictionary of the metadata for each FADC/channel combination, which is returned
93
+
94
+ FADC-ID and channel-ID are combined into a single id for flattening:
95
+ (fadcid << 4) + chid
96
+
97
+ returns number of bytes read
98
+ """
99
+ # f_in.seek(16) #should be after file header anyhow, but re-set if not
100
+ n_bytes_read = 0
101
+ self.channel_configs = {}
102
+
103
+ if self.length_econf != 88:
104
+ raise RuntimeError("Invalid channel configuration format")
105
+
106
+ for _i in range(0, self.number_chOpen):
107
+ # print("reading in channel config {}".format(i))
108
+
109
+ channel = f_in.read(self.length_econf)
110
+ n_bytes_read += self.length_econf
111
+ ch_dpf = channel[16:32]
112
+ evt_data_32 = np.frombuffer(channel, dtype=np.uint32)
113
+ evt_data_dpf = np.frombuffer(ch_dpf, dtype=np.float64)
114
+
115
+ fadc_index = evt_data_32[0]
116
+ channel_index = evt_data_32[1]
117
+ fch_id = join_fadcid_chid(fadc_index, channel_index)
118
+
119
+ if fch_id in self.channel_configs:
120
+ raise RuntimeError(
121
+ f"duplicate channel configuration in file: FADCID: {fadc_index}, ChannelID: {channel_index}"
122
+ )
123
+ else:
124
+ self.channel_configs[fch_id] = {}
125
+
126
+ self.channel_configs[fch_id]["14BitFlag"] = evt_data_32[2] & 0x00000001
127
+ if evt_data_32[2] & 0x00000002 == 0:
128
+ log.warning("Channel in configuration marked as non-open!")
129
+ self.channel_configs[fch_id]["ADC_offset"] = evt_data_32[3]
130
+ self.channel_configs[fch_id]["sample_freq"] = evt_data_dpf[
131
+ 0
132
+ ] # 64 bit float
133
+ self.channel_configs[fch_id]["gain"] = evt_data_dpf[1]
134
+ self.channel_configs[fch_id]["format_bits"] = evt_data_32[8]
135
+ self.channel_configs[fch_id]["sample_start_index"] = evt_data_32[9]
136
+ self.channel_configs[fch_id]["sample_pretrigger"] = evt_data_32[10]
137
+ self.channel_configs[fch_id]["avg_sample_pretrigger"] = evt_data_32[11]
138
+ self.channel_configs[fch_id]["avg_mode"] = evt_data_32[12]
139
+ self.channel_configs[fch_id]["sample_length"] = evt_data_32[13]
140
+ self.channel_configs[fch_id]["avg_sample_length"] = evt_data_32[14]
141
+ self.channel_configs[fch_id]["MAW_buffer_length"] = evt_data_32[15]
142
+ self.channel_configs[fch_id]["event_length"] = evt_data_32[16]
143
+ self.channel_configs[fch_id]["event_header_length"] = evt_data_32[17]
144
+ self.channel_configs[fch_id]["accum6_offset"] = evt_data_32[18]
145
+ self.channel_configs[fch_id]["accum2_offset"] = evt_data_32[19]
146
+ self.channel_configs[fch_id]["MAW3_offset"] = evt_data_32[20]
147
+ self.channel_configs[fch_id]["energy_offset"] = evt_data_32[21]
148
+
149
+ return n_bytes_read