ctao-calibpipe 0.1.0rc9__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ctao-calibpipe might be problematic. Click here for more details.

Files changed (24) hide show
  1. calibpipe/_version.py +2 -2
  2. calibpipe/core/common_metadata_containers.py +3 -0
  3. calibpipe/database/adapter/adapter.py +1 -1
  4. calibpipe/database/adapter/database_containers/__init__.py +2 -0
  5. calibpipe/database/adapter/database_containers/common_metadata.py +2 -0
  6. calibpipe/database/adapter/database_containers/throughput.py +30 -0
  7. calibpipe/database/interfaces/table_handler.py +79 -97
  8. calibpipe/telescope/throughput/containers.py +59 -0
  9. calibpipe/tests/unittests/array/test_cross_calibration.py +417 -0
  10. calibpipe/tests/unittests/database/test_table_handler.py +95 -0
  11. calibpipe/tests/unittests/telescope/camera/test_calculate_camcalib_coefficients.py +347 -0
  12. calibpipe/tests/unittests/telescope/camera/test_produce_camcalib_test_data.py +42 -0
  13. calibpipe/tests/unittests/telescope/throughput/test_muon_throughput_calibrator.py +189 -0
  14. calibpipe/tools/camcalib_test_data.py +361 -0
  15. calibpipe/tools/camera_calibrator.py +558 -0
  16. calibpipe/tools/muon_throughput_calculator.py +239 -0
  17. calibpipe/tools/telescope_cross_calibration_calculator.py +721 -0
  18. {ctao_calibpipe-0.1.0rc9.dist-info → ctao_calibpipe-0.2.0.dist-info}/METADATA +3 -2
  19. {ctao_calibpipe-0.1.0rc9.dist-info → ctao_calibpipe-0.2.0.dist-info}/RECORD +24 -14
  20. {ctao_calibpipe-0.1.0rc9.dist-info → ctao_calibpipe-0.2.0.dist-info}/WHEEL +1 -1
  21. {ctao_calibpipe-0.1.0rc9.dist-info → ctao_calibpipe-0.2.0.dist-info}/entry_points.txt +4 -0
  22. {ctao_calibpipe-0.1.0rc9.dist-info → ctao_calibpipe-0.2.0.dist-info}/licenses/AUTHORS.md +0 -0
  23. {ctao_calibpipe-0.1.0rc9.dist-info → ctao_calibpipe-0.2.0.dist-info}/licenses/LICENSE +0 -0
  24. {ctao_calibpipe-0.1.0rc9.dist-info → ctao_calibpipe-0.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,347 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Test calibpipe-calculate-camcalib-coefficients tool
4
+ """
5
+
6
+ from pathlib import Path
7
+
8
+ import numpy as np
9
+ import pytest
10
+ import yaml
11
+ from astropy import units as u
12
+ from astropy.time import Time
13
+ from calibpipe.tools.camera_calibrator import CameraCalibratorTool
14
+ from ctapipe.core import run_tool
15
+ from ctapipe.instrument import SubarrayDescription
16
+ from ctapipe.io import read_table, write_table
17
+ from ctapipe.tools.calculate_pixel_stats import PixelStatisticsCalculatorTool
18
+ from ctapipe.tools.process import ProcessorTool
19
+ from traitlets.config.loader import Config
20
+
21
+ # Get the path to the configuration files
22
+ CONFIG_PATH = Path(__file__).parent.joinpath(
23
+ "../../../../../../docs/source/examples/telescope/camera/configuration/"
24
+ )
25
+ # Get the path to the calibration events
26
+ DATA_PATH = Path(__file__).parent.joinpath("../../../data/telescope/camera/")
27
+ # The telescope ID to be used in the tests
28
+ TEL_ID = 1
29
+ # Define the group in the monitoring file
30
+ MONITORING_TEL_GROUP = "/dl1/monitoring/telescope/"
31
+ # The monitoring groups to be used in the tests
32
+ MONITORING_GROUPS = ["pedestal_image", "flatfield_image", "flatfield_peak_time"]
33
+ # Define reference time and trigger rate for the tests. These values
34
+ # are used to create realistic timestamps for the aggregated chunks.
35
+ REFERENCE_TIME = Time.now()
36
+ REFERENCE_TRIGGER_RATE = 1000.0 * u.Hz
37
+ # Create a configuration suitable for the tests
38
+ SIM_ARGV = [
39
+ "--SimTelEventSource.skip_calibration_events=False",
40
+ "--SimTelEventSource.skip_r1_calibration=True",
41
+ ]
42
+ # Simulated values for the tests for the different gain channels
43
+ # HG: High Gain, LG: Low Gain
44
+ EXPECTED_ADC_OFFSET = {"HG": 400, "LG": 400}
45
+ EXPECTED_DC2PE = {"HG": 0.015, "LG": 0.25}
46
+ EXPECTED_TIME_SHIFT = {"HG": 0.0, "LG": 0.0}
47
+ # Set different file prefixes and tolerances for the two statistic modes
48
+ # - 'low_stats': 100 events per calibration type processed from simtel files to the final camcalib
49
+ # coefficients to ensure that the camcalib coefficients calculation works from DL0.
50
+ # Due to the low number of events, the tolerance for the coefficients is relatively high,
51
+ # since the statistics are not sufficient to calculate the coefficients with high precision.
52
+ # - 'high_stats': 25000 events per calibration type already aggregated from simtel files via
53
+ # the 'CamCalibTestDataTool' and retrieved from MinIO. The final camcalib coefficients
54
+ # are calculated from the aggregated statistics files to test the correctness
55
+ # of the camcalib coefficients calculation within a restrictive tolerance.
56
+ FILE_PREFIX = {"low_stats": "statsagg_", "high_stats": "calibpipe_v0.2.0_statsagg_"}
57
+ DC2PE_TOLERANCE = {
58
+ "low_stats": {"rtol": 0.25, "atol": 0.0},
59
+ "high_stats": {"rtol": 0.02, "atol": 0.0},
60
+ }
61
+ ADC_OFFSET_TOLERANCE = {
62
+ "low_stats": {"rtol": 0.0, "atol": 10.0},
63
+ "high_stats": {"rtol": 0.0, "atol": 2.0},
64
+ }
65
+ TIME_SHIFT_TOLERANCE = {
66
+ "low_stats": {"rtol": 0.0, "atol": 0.25},
67
+ "high_stats": {"rtol": 0.0, "atol": 0.25},
68
+ }
69
+
70
+
71
+ @pytest.mark.order(1)
72
+ def test_produce_dl1_image_file():
73
+ """
74
+ Produce DL1A file containing the images of the calibration events.
75
+ """
76
+ # Set the path to the simtel calibration events
77
+ for calibration_type in ["pedestal", "flatfield"]:
78
+ simtel_file = DATA_PATH.joinpath(f"{calibration_type}_LST_dark.simtel.gz")
79
+ # Set the output file path for pedestal images
80
+ image_file = DATA_PATH.joinpath(f"{calibration_type}_events.dl1.h5")
81
+ with open(
82
+ CONFIG_PATH.joinpath(f"ctapipe_process_{calibration_type}.yaml")
83
+ ) as yaml_file:
84
+ config = yaml.safe_load(yaml_file)
85
+ # Run the ProcessorTool to create pedestal images
86
+ assert (
87
+ run_tool(
88
+ ProcessorTool(config=Config(config)),
89
+ argv=[
90
+ f"--input={simtel_file}",
91
+ f"--output={image_file}",
92
+ "--overwrite",
93
+ ]
94
+ + SIM_ARGV,
95
+ cwd=DATA_PATH,
96
+ )
97
+ == 0
98
+ )
99
+
100
+
101
+ @pytest.mark.order(2)
102
+ @pytest.mark.verifies_usecase("UC-120-2.21")
103
+ @pytest.mark.parametrize(
104
+ "aggregation_mode",
105
+ ["single_chunk", "same_chunks", "different_chunks"],
106
+ )
107
+ def test_stats_aggregation(aggregation_mode):
108
+ """
109
+ DL1 camera monitoring file containing the statistics aggregation for a given chunk mode.
110
+ """
111
+ # Set the output file path for the statistics aggregation
112
+ output_file = DATA_PATH.joinpath(f"statsagg_{aggregation_mode}.dl1.h5")
113
+ # Loop over the monitoring groups and calculate pixel statistics
114
+ for mon_group in MONITORING_GROUPS:
115
+ # Set the input file path for the PixelStatisticsCalculator
116
+ dl1_image_file = (
117
+ DATA_PATH.joinpath("pedestal_events.dl1.h5")
118
+ if mon_group == "pedestal_image"
119
+ else DATA_PATH.joinpath("flatfield_events.dl1.h5")
120
+ )
121
+ # Get the standard configuration for the PixelStatisticsCalculator
122
+ with open(
123
+ CONFIG_PATH.joinpath(f"ctapipe_calculate_pixel_stats_{mon_group}.yaml")
124
+ ) as yaml_file:
125
+ pix_stats_config = yaml.safe_load(yaml_file)
126
+ # Set some additional parameters using cli arguments
127
+ cli_argv = [
128
+ f"--input_url={dl1_image_file}",
129
+ f"--output_path={output_file}",
130
+ ]
131
+ n_events = len(
132
+ read_table(
133
+ dl1_image_file,
134
+ path=f"/dl1/event/telescope/images/tel_{TEL_ID:03d}",
135
+ )
136
+ )
137
+ # Modify the configuration for the specific chunk mode
138
+ if aggregation_mode == "single_chunk":
139
+ chunk_duration = 1000.0 * u.s
140
+ # Use a single chunk size for all monitoring groups
141
+ # Overwrite the chunk size for the specific aggregator
142
+ if mon_group == "flatfield_peak_time":
143
+ cli_argv.append(f"--PlainAggregator.chunk_size={n_events}")
144
+ else:
145
+ cli_argv.append(f"--SigmaClippingAggregator.chunk_size={n_events}")
146
+ elif aggregation_mode == "same_chunks":
147
+ chunk_duration = 100.0 * u.s
148
+ # Overwrite the chunk size for the specific aggregators to have ten chunks
149
+ if mon_group == "flatfield_peak_time":
150
+ cli_argv.append(f"--PlainAggregator.chunk_size={n_events//10}")
151
+ else:
152
+ cli_argv.append(
153
+ f"--SigmaClippingAggregator.chunk_size={n_events//10}"
154
+ )
155
+ elif aggregation_mode == "different_chunks":
156
+ # Use different chunk sizes for each monitoring group
157
+ if mon_group == "pedestal_image":
158
+ chunk_duration = 200.0 * u.s
159
+ cli_argv.append(
160
+ f"--SigmaClippingAggregator.chunk_size={2 * (n_events//10)}"
161
+ )
162
+ elif mon_group == "flatfield_image":
163
+ chunk_duration = 100.0 * u.s
164
+ cli_argv.append(
165
+ f"--SigmaClippingAggregator.chunk_size={n_events//10}"
166
+ )
167
+ elif mon_group == "flatfield_peak_time":
168
+ chunk_duration = 500.0 * u.s
169
+ cli_argv.append(
170
+ f"--PlainAggregator.chunk_size={5 * (n_events//10)}"
171
+ )
172
+
173
+ # Run the PixelStatisticsCalculatorTool to calculate pixel statistics
174
+ assert (
175
+ run_tool(
176
+ PixelStatisticsCalculatorTool(config=Config(pix_stats_config)),
177
+ argv=cli_argv,
178
+ cwd=DATA_PATH,
179
+ raises=True,
180
+ )
181
+ == 0
182
+ )
183
+ # Overwrite timestamps in the output file to make them realistic
184
+ # Read the created statsagg table for the specific monitoring group
185
+ stats_aggregation_tab = read_table(
186
+ output_file,
187
+ path=f"{MONITORING_TEL_GROUP}{mon_group}/tel_{TEL_ID:03d}",
188
+ )
189
+ # Loop over the chunks and set the new timestamps
190
+ for chunk_nr in range(len(stats_aggregation_tab)):
191
+ stats_aggregation_tab["time_start"][chunk_nr] = (
192
+ REFERENCE_TIME
193
+ + (1 / REFERENCE_TRIGGER_RATE).to(u.s)
194
+ + chunk_nr * chunk_duration
195
+ )
196
+ stats_aggregation_tab["time_end"][chunk_nr] = (
197
+ REFERENCE_TIME + (chunk_nr + 1) * chunk_duration
198
+ )
199
+ # Set a different starting time (outside the default 1 second tolerance)
200
+ # for the pedestal group if the mode is 'diffent_chunks'. This is to ensure
201
+ # that the we can later test when the chunk interpolator is returning NaN values
202
+ # for the first and last unique timestamps.
203
+ if aggregation_mode == "different_chunks":
204
+ if mon_group == "pedestal_image":
205
+ stats_aggregation_tab["time_start"][0] -= 2 * u.s
206
+ stats_aggregation_tab["time_end"][-1] += 2 * u.s
207
+ # Overwrite the table in the output file
208
+ write_table(
209
+ stats_aggregation_tab,
210
+ output_file,
211
+ f"{MONITORING_TEL_GROUP}{mon_group}/tel_{TEL_ID:03d}",
212
+ overwrite=True,
213
+ )
214
+
215
+
216
+ # We are ignoring the warning about NaN slices, since we expect all values to be
217
+ # NaN for the first and last timestamps in the 'different_chunks' mode.
218
+ @pytest.mark.order(3)
219
+ @pytest.mark.verifies_usecase("UC-120-2.20")
220
+ @pytest.mark.parametrize(
221
+ "statistic_mode",
222
+ ["low_stats", "high_stats"],
223
+ )
224
+ @pytest.mark.filterwarnings("ignore:All-NaN slice encountered")
225
+ def test_calculate_camcalib_coeffs_tool(statistic_mode):
226
+ """check camcalib coefficients calculation from dl1 camera monitoring data files"""
227
+ # There are three different aggregation modes:
228
+ # - single_chunk: all monitoring groups are aggregated in a single chunk
229
+ # - same_chunks: all monitoring groups are aggregated in the same chunks
230
+ # - different_chunks: each monitoring group is aggregated in different chunks
231
+ for aggregation_mode in ["single_chunk", "same_chunks", "different_chunks"]:
232
+ # Set the path to the simtel pedestal events
233
+ stats_aggregation_file = DATA_PATH.joinpath(
234
+ f"{FILE_PREFIX[statistic_mode]}{aggregation_mode}.dl1.h5"
235
+ )
236
+ # Run the tool with the configuration and the input file
237
+ assert (
238
+ run_tool(
239
+ CameraCalibratorTool(),
240
+ argv=[
241
+ f"--input_url={stats_aggregation_file}",
242
+ "--overwrite",
243
+ ],
244
+ cwd=DATA_PATH,
245
+ raises=True,
246
+ )
247
+ == 0
248
+ )
249
+ # Read subarray description from the created monitoring file
250
+ subarray = SubarrayDescription.from_hdf(stats_aggregation_file)
251
+ # Check for the selected telescope
252
+ assert subarray.tel_ids[0] == TEL_ID
253
+ # Read the camera calibration coefficients from the created monitoring file
254
+ # and check that the calculated values are as expected.
255
+ camcalib_coeffs = read_table(
256
+ stats_aggregation_file,
257
+ path=f"{MONITORING_TEL_GROUP}camera_calibration/tel_{TEL_ID:03d}",
258
+ )
259
+ for i in range(len(camcalib_coeffs)):
260
+ if aggregation_mode == "different_chunks":
261
+ # For the 'different_chunks' mode, we expect the first factor and pedestal
262
+ # to be NaN, since the first timestamp is not valid for the pedestal group.
263
+ if i == 0 or i == len(camcalib_coeffs) - 1:
264
+ # Check that the factor and time shift are NaN for the first and last timestamps
265
+ assert np.isnan(camcalib_coeffs["factor"][i]).all()
266
+ assert np.isnan(camcalib_coeffs["time_shift"][i]).all()
267
+ # Check that the outlier mask is all True for the first and last timestamps
268
+ assert camcalib_coeffs["outlier_mask"][i].all()
269
+ # Check that the is_valid flag is False for the first and last timestamps
270
+ assert not camcalib_coeffs["is_valid"][i]
271
+ # Check that the pedestal offsets are not NaN since the first and last timestamps
272
+ # are valid for the pedestal group.
273
+ for g, gain_channel in enumerate(["HG", "LG"]):
274
+ np.testing.assert_allclose(
275
+ np.nanmedian(camcalib_coeffs["pedestal_offset"][i][g]),
276
+ EXPECTED_ADC_OFFSET[gain_channel],
277
+ rtol=ADC_OFFSET_TOLERANCE[statistic_mode]["rtol"],
278
+ atol=ADC_OFFSET_TOLERANCE[statistic_mode]["atol"],
279
+ err_msg=(
280
+ f"Pedestal per sample values do not match expected values within "
281
+ f"a tolerance of {int(ADC_OFFSET_TOLERANCE[statistic_mode]['atol'])} ADC counts"
282
+ ),
283
+ )
284
+ continue
285
+ # Check that the median of the calculated factor is close to the
286
+ # simtel_dc2pe values for the corresponding gain channel.
287
+ for g, gain_channel in enumerate(["HG", "LG"]):
288
+ np.testing.assert_allclose(
289
+ np.nanmedian(camcalib_coeffs["factor"][i][g]),
290
+ EXPECTED_DC2PE[gain_channel],
291
+ rtol=DC2PE_TOLERANCE[statistic_mode]["rtol"],
292
+ atol=DC2PE_TOLERANCE[statistic_mode]["atol"],
293
+ err_msg=(
294
+ f"Factor coefficients do not match expected values within "
295
+ f"a tolerance of {int(DC2PE_TOLERANCE[statistic_mode]['rtol']*100)}%"
296
+ ),
297
+ )
298
+ # Check that the median of the calculated pedestal offset is close to the
299
+ # simtel_pedestal_per_sample values for the corresponding gain channel.
300
+ np.testing.assert_allclose(
301
+ np.nanmedian(camcalib_coeffs["pedestal_offset"][i][g]),
302
+ EXPECTED_ADC_OFFSET[gain_channel],
303
+ rtol=ADC_OFFSET_TOLERANCE[statistic_mode]["rtol"],
304
+ atol=ADC_OFFSET_TOLERANCE[statistic_mode]["atol"],
305
+ err_msg=(
306
+ f"Pedestal per sample values do not match expected values within "
307
+ f"a tolerance of {int(ADC_OFFSET_TOLERANCE[statistic_mode]['atol'])} ADC counts"
308
+ ),
309
+ )
310
+ # Check that the median of the calculated time shift is close to the
311
+ # simtel_time_shift values for the corresponding gain channel.
312
+ np.testing.assert_allclose(
313
+ np.nanmedian(camcalib_coeffs["time_shift"][i][g]),
314
+ EXPECTED_TIME_SHIFT[gain_channel],
315
+ rtol=TIME_SHIFT_TOLERANCE[statistic_mode]["rtol"],
316
+ atol=TIME_SHIFT_TOLERANCE[statistic_mode]["atol"],
317
+ err_msg=(
318
+ "Time shift values do not match expected values "
319
+ "within a tolerance of a quarter of a waveform sample"
320
+ ),
321
+ )
322
+ # Check that the is_valid flag is True for all timestamps
323
+ assert camcalib_coeffs["is_valid"][i]
324
+
325
+
326
+ def test_npe_std_outlier_detector():
327
+ """check camcalib coefficients calculation with the NpeStdOutlierDetector"""
328
+ # Only consider the single_chunk aggregation mode for this test
329
+ stats_aggregation_file = DATA_PATH.joinpath(
330
+ "calibpipe_v0.2.0_statsagg_single_chunk.dl1.h5"
331
+ )
332
+ # Read the NpeStdOutlierDetector configuration from the YAML file
333
+ with open(CONFIG_PATH.joinpath("npe_std_outlier_detector.yaml")) as yaml_file:
334
+ npe_std_outlier_detector_config = yaml.safe_load(yaml_file)
335
+ # Run the CameraCalibratorTool with the NpeStdOutlierDetector configuration
336
+ assert (
337
+ run_tool(
338
+ CameraCalibratorTool(config=Config(npe_std_outlier_detector_config)),
339
+ argv=[
340
+ f"--input_url={stats_aggregation_file}",
341
+ "--overwrite",
342
+ ],
343
+ cwd=DATA_PATH,
344
+ raises=True,
345
+ )
346
+ == 0
347
+ )
@@ -0,0 +1,42 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Test calibpipe-produce-camcalib-test-data tool
4
+ """
5
+
6
+ from pathlib import Path
7
+
8
+ from calibpipe.tools.camcalib_test_data import CamCalibTestDataTool
9
+ from ctapipe.core import run_tool
10
+
11
+ # Get the path to the configuration files
12
+ CONFIG_PATH = Path(__file__).parent.joinpath(
13
+ "../../../../../../docs/source/examples/telescope/camera/configuration/"
14
+ )
15
+ # Get the path to the calibration events
16
+ DATA_PATH = Path(__file__).parent.joinpath("../../../data/telescope/camera/")
17
+
18
+
19
+ def test_produce_camcalib_test_data():
20
+ """Test the calibpipe-produce-camcalib-test-data tool"""
21
+ # Get the pedestal and flatfield simtel files
22
+ pedestal_simtel_file = DATA_PATH.joinpath("pedestal_LST_dark.simtel.gz")
23
+ flatfield_simtel_file = DATA_PATH.joinpath("flatfield_LST_dark.simtel.gz")
24
+ # Run the tool with the configuration and the input files
25
+ assert (
26
+ run_tool(
27
+ CamCalibTestDataTool(),
28
+ argv=[
29
+ f"--CamCalibTestDataTool.pedestal_input_url={pedestal_simtel_file}",
30
+ f"--CamCalibTestDataTool.flatfield_input_url={flatfield_simtel_file}",
31
+ f"--CamCalibTestDataTool.output_dir={DATA_PATH}",
32
+ f"--CamCalibTestDataTool.process_pedestal_config={CONFIG_PATH.joinpath('ctapipe_process_pedestal.yaml')}",
33
+ f"--CamCalibTestDataTool.process_flatfield_config={CONFIG_PATH.joinpath('ctapipe_process_flatfield.yaml')}",
34
+ f"--CamCalibTestDataTool.agg_stats_pedestal_image_config={CONFIG_PATH.joinpath('ctapipe_calculate_pixel_stats_pedestal_image.yaml')}",
35
+ f"--CamCalibTestDataTool.agg_stats_flatfield_image_config={CONFIG_PATH.joinpath('ctapipe_calculate_pixel_stats_flatfield_image.yaml')}",
36
+ f"--CamCalibTestDataTool.agg_stats_flatfield_peak_time_config={CONFIG_PATH.joinpath('ctapipe_calculate_pixel_stats_flatfield_peak_time.yaml')}",
37
+ ],
38
+ cwd=DATA_PATH,
39
+ raises=True,
40
+ )
41
+ == 0
42
+ )
@@ -0,0 +1,189 @@
1
+ from datetime import datetime
2
+ from pathlib import Path
3
+
4
+ import numpy as np
5
+ import pytest
6
+ import yaml
7
+ from astropy.table import QTable
8
+ from astropy.time import Time
9
+ from calibpipe.database.connections import CalibPipeDatabase
10
+ from calibpipe.database.interfaces import TableHandler
11
+ from calibpipe.telescope.throughput.containers import (
12
+ OpticalThoughtputContainer,
13
+ )
14
+ from calibpipe.tools.muon_throughput_calculator import CalculateThroughputWithMuons
15
+ from traitlets.config.loader import Config
16
+
17
+
18
+ class TestMuonThroughputCalibration:
19
+ config_path = Path(__file__).parent.joinpath(
20
+ "../../../../../../docs/source/examples/telescope/throughput/configuration/"
21
+ )
22
+ db_config_path = Path(__file__).parent.joinpath(
23
+ "../../../../../../docs/source/examples/utils/configuration/"
24
+ )
25
+ data_path = Path(__file__).parent.joinpath("../../../data/telescope/throughput/")
26
+
27
+ with open(config_path.joinpath("throughput_muon_configuration.yaml")) as yaml_file:
28
+ data = yaml.safe_load(yaml_file)
29
+ data["CalculateThroughputWithMuons"]["output_url"] = data_path.joinpath(
30
+ "OpticalThroughput.ecsv"
31
+ )
32
+
33
+ input_empty_muon_table = data_path.joinpath("empty_muon_table.h5")
34
+ input_good_muon_table_lst = data_path.joinpath("lst_muon_table.h5")
35
+
36
+ @pytest.mark.muon()
37
+ def test_empty_data(self):
38
+ self.data["CalculateThroughputWithMuons"][
39
+ "input_url"
40
+ ] = self.input_empty_muon_table
41
+ test_calculate_throughput_muon_tool = CalculateThroughputWithMuons(
42
+ config=Config(self.data)
43
+ )
44
+
45
+ test_calculate_throughput_muon_tool.setup()
46
+ test_calculate_throughput_muon_tool.start()
47
+
48
+ result = test_calculate_throughput_muon_tool.throughput_containers
49
+
50
+ assert np.isnan(result[1]["optical_throughput_coefficient"])
51
+ assert np.isnan(result[1]["optical_throughput_coefficient_std"])
52
+ assert result[1]["method"] == "None"
53
+
54
+ @pytest.mark.muon()
55
+ def test_muon_data(self):
56
+ self.data["CalculateThroughputWithMuons"][
57
+ "input_url"
58
+ ] = self.input_good_muon_table_lst
59
+ test_calculate_throughput_muon_tool = CalculateThroughputWithMuons(
60
+ config=Config(self.data)
61
+ )
62
+
63
+ test_calculate_throughput_muon_tool.setup()
64
+ test_calculate_throughput_muon_tool.start()
65
+
66
+ containers = test_calculate_throughput_muon_tool.throughput_containers
67
+
68
+ assert containers is not None
69
+
70
+ expected_values = {
71
+ "optical_throughput_coefficient": 0.19140317564869455,
72
+ "optical_throughput_coefficient_std": 0.00835591540715762,
73
+ "method": "Muon Rings",
74
+ "validity_start": datetime(2024, 9, 24, 15, 6, 25, 976202),
75
+ "validity_end": datetime(2024, 9, 24, 15, 6, 38, 1943),
76
+ "obs_id": 101,
77
+ "tel_id": 1,
78
+ "n_events": 3,
79
+ }
80
+
81
+ assert containers[1].optical_throughput_coefficient == pytest.approx(
82
+ expected_values["optical_throughput_coefficient"], rel=1e-6
83
+ )
84
+ assert containers[1].optical_throughput_coefficient_std == pytest.approx(
85
+ expected_values["optical_throughput_coefficient_std"], rel=1e-6
86
+ )
87
+
88
+ assert containers[1].method == expected_values["method"]
89
+ assert containers[1].validity_start == expected_values["validity_start"]
90
+ assert containers[1].validity_end == expected_values["validity_end"]
91
+ assert containers[1].obs_id == expected_values["obs_id"]
92
+ assert containers[1].tel_id == expected_values["tel_id"]
93
+ assert containers[1].n_events == expected_values["n_events"]
94
+
95
+ @pytest.mark.muon()
96
+ @pytest.mark.db()
97
+ def test_upload_muon_data_db(self):
98
+ self.data["CalculateThroughputWithMuons"][
99
+ "input_url"
100
+ ] = self.input_good_muon_table_lst
101
+ test_calculate_throughput_muon_tool = CalculateThroughputWithMuons(
102
+ config=Config(self.data)
103
+ )
104
+ test_calculate_throughput_muon_tool.setup()
105
+
106
+ test_calculate_throughput_muon_tool.throughput_containers[
107
+ 1
108
+ ] = OpticalThoughtputContainer(
109
+ optical_throughput_coefficient=1,
110
+ optical_throughput_coefficient_std=0.1,
111
+ method="Muon Rings",
112
+ validity_start=datetime(1970, 1, 1, 17, 49, 37, 629896),
113
+ validity_end=datetime(1970, 1, 1, 17, 49, 37, 630035),
114
+ obs_id=101,
115
+ tel_id=1,
116
+ n_events=1,
117
+ )
118
+ test_calculate_throughput_muon_tool.finish()
119
+
120
+ with CalibPipeDatabase(
121
+ **self.data["database_configuration"],
122
+ ) as connection:
123
+ qtable = TableHandler.read_table_from_database(
124
+ type(OpticalThoughtputContainer()), connection
125
+ )
126
+
127
+ assert qtable is not None
128
+ uploaded_container = (
129
+ test_calculate_throughput_muon_tool.throughput_containers[1]
130
+ )
131
+ assert qtable[-1]["tel_id"] == uploaded_container["tel_id"]
132
+ assert qtable[-1]["obs_id"] == uploaded_container["obs_id"]
133
+ assert qtable[-1]["method"] == uploaded_container["method"]
134
+ assert (
135
+ qtable[-1]["optical_throughput_coefficient"]
136
+ == uploaded_container["optical_throughput_coefficient"]
137
+ )
138
+ assert (
139
+ qtable[-1]["optical_throughput_coefficient_std"]
140
+ == uploaded_container["optical_throughput_coefficient_std"]
141
+ )
142
+ assert qtable[-1]["n_events"] == uploaded_container["n_events"]
143
+ assert qtable[-1]["validity_start"].tzinfo is not None
144
+ assert qtable[-1]["validity_end"].tzinfo is not None
145
+
146
+ @pytest.mark.muon()
147
+ @pytest.mark.db()
148
+ def test_muon_table(self):
149
+ self.data["CalculateThroughputWithMuons"][
150
+ "input_url"
151
+ ] = self.input_good_muon_table_lst
152
+ test_calculate_throughput_muon_tool = CalculateThroughputWithMuons(
153
+ config=Config(self.data)
154
+ )
155
+ test_calculate_throughput_muon_tool.setup()
156
+ test_calculate_throughput_muon_tool.throughput_containers[
157
+ 1
158
+ ] = OpticalThoughtputContainer(
159
+ optical_throughput_coefficient=1,
160
+ optical_throughput_coefficient_std=0.1,
161
+ method="Muon Rings",
162
+ validity_start=datetime(1970, 1, 1, 17, 49, 37, 629896),
163
+ validity_end=datetime(1970, 1, 1, 17, 49, 37, 630035),
164
+ obs_id=101,
165
+ tel_id=1,
166
+ n_events=1,
167
+ )
168
+ test_calculate_throughput_muon_tool.finish()
169
+ table_path = test_calculate_throughput_muon_tool.output_url
170
+ written_table = QTable.read(table_path, format="ascii.ecsv")
171
+
172
+ assert isinstance(
173
+ written_table["validity_start"][0], Time
174
+ ), "validity_start should be an Astropy Time object."
175
+ assert isinstance(
176
+ written_table["validity_end"][0], Time
177
+ ), "validity_end should be an Astropy Time object."
178
+ assert (
179
+ written_table["validity_start"][0].scale == "utc"
180
+ ), "validity_start should be timezone aware."
181
+ assert (
182
+ written_table["validity_end"][0].scale == "utc"
183
+ ), "validity_end should be timezone aware."
184
+ assert written_table["obs_id"][0] == 101
185
+ assert written_table["tel_id"][0] == 1
186
+ assert written_table["method"][0] == "Muon Rings"
187
+ assert written_table["optical_throughput_coefficient"][0] == 1
188
+ assert written_table["optical_throughput_coefficient_std"][0] == 0.1
189
+ assert written_table["n_events"][0] == 1