ctao-calibpipe 0.3.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. calibpipe/__init__.py +5 -0
  2. calibpipe/_dev_version/__init__.py +9 -0
  3. calibpipe/_version.py +34 -0
  4. calibpipe/atmosphere/__init__.py +1 -0
  5. calibpipe/atmosphere/atmosphere_containers.py +109 -0
  6. calibpipe/atmosphere/meteo_data_handlers.py +485 -0
  7. calibpipe/atmosphere/models/README.md +14 -0
  8. calibpipe/atmosphere/models/__init__.py +1 -0
  9. calibpipe/atmosphere/models/macobac.ecsv +23 -0
  10. calibpipe/atmosphere/models/reference_MDPs/__init__.py +1 -0
  11. calibpipe/atmosphere/models/reference_MDPs/ref_density_at_15km_ctao-north_intermediate.ecsv +8 -0
  12. calibpipe/atmosphere/models/reference_MDPs/ref_density_at_15km_ctao-north_summer.ecsv +8 -0
  13. calibpipe/atmosphere/models/reference_MDPs/ref_density_at_15km_ctao-north_winter.ecsv +8 -0
  14. calibpipe/atmosphere/models/reference_MDPs/ref_density_at_15km_ctao-south_summer.ecsv +8 -0
  15. calibpipe/atmosphere/models/reference_MDPs/ref_density_at_15km_ctao-south_winter.ecsv +8 -0
  16. calibpipe/atmosphere/models/reference_atmospheres/__init__.py +1 -0
  17. calibpipe/atmosphere/models/reference_atmospheres/reference_atmo_model_v0_ctao-north_intermediate.ecsv +73 -0
  18. calibpipe/atmosphere/models/reference_atmospheres/reference_atmo_model_v0_ctao-north_summer.ecsv +73 -0
  19. calibpipe/atmosphere/models/reference_atmospheres/reference_atmo_model_v0_ctao-north_winter.ecsv +73 -0
  20. calibpipe/atmosphere/models/reference_atmospheres/reference_atmo_model_v0_ctao-south_summer.ecsv +73 -0
  21. calibpipe/atmosphere/models/reference_atmospheres/reference_atmo_model_v0_ctao-south_winter.ecsv +73 -0
  22. calibpipe/atmosphere/models/reference_rayleigh_scattering_profiles/__init__.py +1 -0
  23. calibpipe/atmosphere/models/reference_rayleigh_scattering_profiles/reference_rayleigh_extinction_profile_v0_ctao-north_intermediate.ecsv +857 -0
  24. calibpipe/atmosphere/models/reference_rayleigh_scattering_profiles/reference_rayleigh_extinction_profile_v0_ctao-north_summer.ecsv +857 -0
  25. calibpipe/atmosphere/models/reference_rayleigh_scattering_profiles/reference_rayleigh_extinction_profile_v0_ctao-north_winter.ecsv +857 -0
  26. calibpipe/atmosphere/models/reference_rayleigh_scattering_profiles/reference_rayleigh_extinction_profile_v0_ctao-south_summer.ecsv +857 -0
  27. calibpipe/atmosphere/models/reference_rayleigh_scattering_profiles/reference_rayleigh_extinction_profile_v0_ctao-south_winter.ecsv +857 -0
  28. calibpipe/atmosphere/templates/request_templates/__init__.py +1 -0
  29. calibpipe/atmosphere/templates/request_templates/copernicus.json +11 -0
  30. calibpipe/atmosphere/templates/request_templates/gdas.json +12 -0
  31. calibpipe/core/__init__.py +39 -0
  32. calibpipe/core/common_metadata_containers.py +198 -0
  33. calibpipe/core/exceptions.py +87 -0
  34. calibpipe/database/__init__.py +24 -0
  35. calibpipe/database/adapter/__init__.py +23 -0
  36. calibpipe/database/adapter/adapter.py +80 -0
  37. calibpipe/database/adapter/database_containers/__init__.py +63 -0
  38. calibpipe/database/adapter/database_containers/atmosphere.py +199 -0
  39. calibpipe/database/adapter/database_containers/common_metadata.py +150 -0
  40. calibpipe/database/adapter/database_containers/container_map.py +59 -0
  41. calibpipe/database/adapter/database_containers/observatory.py +61 -0
  42. calibpipe/database/adapter/database_containers/table_version_manager.py +39 -0
  43. calibpipe/database/adapter/database_containers/throughput.py +30 -0
  44. calibpipe/database/adapter/database_containers/version_control.py +17 -0
  45. calibpipe/database/connections/__init__.py +28 -0
  46. calibpipe/database/connections/calibpipe_database.py +60 -0
  47. calibpipe/database/connections/postgres_utils.py +97 -0
  48. calibpipe/database/connections/sql_connection.py +103 -0
  49. calibpipe/database/connections/user_confirmation.py +19 -0
  50. calibpipe/database/interfaces/__init__.py +71 -0
  51. calibpipe/database/interfaces/hashable_row_data.py +54 -0
  52. calibpipe/database/interfaces/queries.py +180 -0
  53. calibpipe/database/interfaces/sql_column_info.py +67 -0
  54. calibpipe/database/interfaces/sql_metadata.py +6 -0
  55. calibpipe/database/interfaces/sql_table_info.py +131 -0
  56. calibpipe/database/interfaces/table_handler.py +333 -0
  57. calibpipe/database/interfaces/types.py +96 -0
  58. calibpipe/telescope/throughput/containers.py +66 -0
  59. calibpipe/tests/conftest.py +274 -0
  60. calibpipe/tests/data/atmosphere/molecular_atmosphere/__init__.py +0 -0
  61. calibpipe/tests/data/atmosphere/molecular_atmosphere/contemporary_MDP.ecsv +34 -0
  62. calibpipe/tests/data/atmosphere/molecular_atmosphere/macobac.csv +852 -0
  63. calibpipe/tests/data/atmosphere/molecular_atmosphere/macobac.ecsv +23 -0
  64. calibpipe/tests/data/atmosphere/molecular_atmosphere/merged_file.ecsv +1082 -0
  65. calibpipe/tests/data/atmosphere/molecular_atmosphere/meteo_data_copernicus.ecsv +1082 -0
  66. calibpipe/tests/data/atmosphere/molecular_atmosphere/meteo_data_gdas.ecsv +66 -0
  67. calibpipe/tests/data/atmosphere/molecular_atmosphere/observatory_configurations.json +71 -0
  68. calibpipe/tests/data/utils/__init__.py +0 -0
  69. calibpipe/tests/data/utils/meteo_data_winter_and_summer.ecsv +12992 -0
  70. calibpipe/tests/test_conftest_data.py +200 -0
  71. calibpipe/tests/unittests/array/test_cross_calibration.py +412 -0
  72. calibpipe/tests/unittests/atmosphere/astral_testing.py +107 -0
  73. calibpipe/tests/unittests/atmosphere/test_meteo_data_handler.py +775 -0
  74. calibpipe/tests/unittests/atmosphere/test_molecular_atmosphere.py +327 -0
  75. calibpipe/tests/unittests/database/test_table_handler.py +163 -0
  76. calibpipe/tests/unittests/database/test_types.py +38 -0
  77. calibpipe/tests/unittests/telescope/camera/test_calculate_camcalib_coefficients.py +456 -0
  78. calibpipe/tests/unittests/telescope/camera/test_produce_camcalib_test_data.py +37 -0
  79. calibpipe/tests/unittests/telescope/throughput/test_muon_throughput_calibrator.py +693 -0
  80. calibpipe/tests/unittests/test_bootstrap_db.py +79 -0
  81. calibpipe/tests/unittests/utils/test_observatory.py +309 -0
  82. calibpipe/tools/atmospheric_base_tool.py +78 -0
  83. calibpipe/tools/atmospheric_model_db_loader.py +181 -0
  84. calibpipe/tools/basic_tool_with_db.py +38 -0
  85. calibpipe/tools/camcalib_test_data.py +374 -0
  86. calibpipe/tools/camera_calibrator.py +462 -0
  87. calibpipe/tools/contemporary_mdp_producer.py +87 -0
  88. calibpipe/tools/init_db.py +37 -0
  89. calibpipe/tools/macobac_calculator.py +82 -0
  90. calibpipe/tools/molecular_atmospheric_model_producer.py +197 -0
  91. calibpipe/tools/muon_throughput_calculator.py +219 -0
  92. calibpipe/tools/observatory_data_db_loader.py +71 -0
  93. calibpipe/tools/reference_atmospheric_model_selector.py +201 -0
  94. calibpipe/tools/telescope_cross_calibration_calculator.py +721 -0
  95. calibpipe/utils/__init__.py +10 -0
  96. calibpipe/utils/observatory.py +486 -0
  97. calibpipe/utils/observatory_containers.py +26 -0
  98. calibpipe/version.py +24 -0
  99. ctao_calibpipe-0.3.0rc2.dist-info/METADATA +92 -0
  100. ctao_calibpipe-0.3.0rc2.dist-info/RECORD +105 -0
  101. ctao_calibpipe-0.3.0rc2.dist-info/WHEEL +5 -0
  102. ctao_calibpipe-0.3.0rc2.dist-info/entry_points.txt +12 -0
  103. ctao_calibpipe-0.3.0rc2.dist-info/licenses/AUTHORS.md +13 -0
  104. ctao_calibpipe-0.3.0rc2.dist-info/licenses/LICENSE +21 -0
  105. ctao_calibpipe-0.3.0rc2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,693 @@
1
+ from collections import namedtuple
2
+ from pathlib import Path
3
+
4
+ import astropy.units as u
5
+ import numpy as np
6
+ import pytest
7
+ import yaml
8
+ from calibpipe.tools.muon_throughput_calculator import CalculateThroughputWithMuons
9
+ from scipy.optimize import curve_fit, minimize_scalar
10
+ from scipy.stats import chi2
11
+ from traitlets.config.loader import Config
12
+
13
+
14
+ def estimate_sigma_sys(x, sigma_stat, confidence=0.95):
15
+ """
16
+ Estimate uncorrelated systematic scatter sigma_sys via maximum likelihood.
17
+
18
+ Parameters
19
+ ----------
20
+ x : array_like
21
+ Measurements (1D).
22
+ sigma_stat : array_like
23
+ Known per-measurement statistical uncertainties (same length as x).
24
+ confidence : float, optional
25
+ Confidence level for interval (default 0.95).
26
+
27
+ Returns
28
+ -------
29
+ mu_hat : float
30
+ Weighted mean estimate.
31
+ sigma_sys_hat : float
32
+ MLE of systematic scatter.
33
+ ci : tuple
34
+ Confidence interval (low, high) for sigma_sys.
35
+ """
36
+
37
+ x = np.asarray(x)
38
+ sigma_stat = np.asarray(sigma_stat)
39
+
40
+ def nll(s):
41
+ if s < 0:
42
+ return np.inf
43
+ v = sigma_stat**2 + s**2
44
+ w = 1.0 / v
45
+ mu_hat = np.sum(w * x) / np.sum(w)
46
+ return 0.5 * np.sum(np.log(2 * np.pi * v) + (x - mu_hat) ** 2 / v)
47
+
48
+ # Minimize NLL over sigma_sys
49
+ res = minimize_scalar(nll, bounds=(0, np.std(x) * 5), method="bounded")
50
+ sigma_sys_hat = res.x
51
+ nll_min = res.fun
52
+
53
+ # Compute mu_hat at best sigma_sys
54
+ v = sigma_stat**2 + sigma_sys_hat**2
55
+ w = 1.0 / v
56
+ mu_hat = np.sum(w * x) / np.sum(w)
57
+
58
+ # Likelihood-ratio CI
59
+ cutoff = 0.5 * chi2.ppf(confidence, df=1)
60
+
61
+ def objective(s):
62
+ return abs(nll(s) - (nll_min + cutoff))
63
+
64
+ # Lower bound search
65
+ lo = minimize_scalar(objective, bounds=(0, sigma_sys_hat), method="bounded").x
66
+ # Upper bound search
67
+ hi = minimize_scalar(
68
+ objective, bounds=(sigma_sys_hat, np.std(x) * 10), method="bounded"
69
+ ).x
70
+
71
+ return mu_hat, sigma_sys_hat, (lo, hi)
72
+
73
+
74
+ parameter_names = [
75
+ "muon_sign_first",
76
+ "mirror_reflectivity_first",
77
+ "muon_sign_second",
78
+ "mirror_reflectivity_second",
79
+ "parameter_to_compare",
80
+ "absolute_consistency_range",
81
+ "relative_consistency_range",
82
+ "are_expected_to_differ",
83
+ ]
84
+ Parameters = namedtuple("MuonTestParams", parameter_names)
85
+
86
+ parameter_mst_names = [
87
+ "file_fixture_name",
88
+ "expected_throughput",
89
+ "expected_throughput_rel_uncertainty",
90
+ ]
91
+ Parameters_mst = namedtuple("MuonTestParams", parameter_mst_names)
92
+
93
+
94
+ class TestCalculateThroughputWithMuons:
95
+ """Test class for muon throughput analysis"""
96
+
97
+ config_path = Path(__file__).parent.joinpath(
98
+ "../../../../../../docs/source/user_guide/telescope/throughput/configuration/"
99
+ )
100
+
101
+ @pytest.fixture(scope="class")
102
+ def test_config(self, lst_muon_table_file):
103
+ with open(
104
+ self.config_path.joinpath("throughput_muon_configuration.yaml")
105
+ ) as yaml_file:
106
+ data = yaml.safe_load(yaml_file)
107
+
108
+ return data
109
+
110
+ @pytest.fixture(scope="class")
111
+ def empty_muon_file(self, empty_muon_table_file):
112
+ # Note: The fixture is named differently but maps to the same file
113
+ # This is for historical compatibility with the test
114
+ return empty_muon_table_file
115
+
116
+ @pytest.fixture(scope="class")
117
+ def good_muon_file_lst(self, lst_muon_table_file):
118
+ return lst_muon_table_file
119
+
120
+ @pytest.mark.muon()
121
+ def test_empty_data(self, test_config, empty_muon_file):
122
+ test_config["CalculateThroughputWithMuons"]["input_url"] = str(empty_muon_file)
123
+ test_calculate_throughput_muon_tool = CalculateThroughputWithMuons(
124
+ config=Config(test_config)
125
+ )
126
+
127
+ test_calculate_throughput_muon_tool.setup()
128
+
129
+ # The tool should handle empty data gracefully by skipping telescopes with no data
130
+ test_calculate_throughput_muon_tool.start()
131
+
132
+ # Check that containers were initialized but empty for skipped telescopes
133
+ assert len(test_calculate_throughput_muon_tool.throughput_containers) > 0
134
+ for (
135
+ tel_id,
136
+ containers,
137
+ ) in test_calculate_throughput_muon_tool.throughput_containers.items():
138
+ assert containers == {} # Should be empty dict for skipped telescopes
139
+
140
+ def linear_model(self, x, a, b):
141
+ """Linear model for throughput vs reflectivity."""
142
+ return a * x + b
143
+
144
+ @pytest.mark.muon()
145
+ def test_muon_simulation_data_processing(self, test_config, muon_test_files):
146
+ """
147
+ Test that the throughput calculator can successfully process muon simulation data files.
148
+
149
+ This functional test verifies that the tool runs without errors and produces
150
+ valid throughput containers for all simulation files.
151
+ """
152
+ processed_files = 0
153
+
154
+ # Test processing of each muon simulation file
155
+ for particle_type, reflectivity_files in muon_test_files.items():
156
+ for reflectivity, file_path in reflectivity_files.items():
157
+ # Configure tool for this specific file
158
+ test_config["CalculateThroughputWithMuons"]["input_url"] = str(
159
+ file_path
160
+ )
161
+ tool = CalculateThroughputWithMuons(config=Config(test_config))
162
+
163
+ # Test setup phase
164
+ tool.setup()
165
+ assert (
166
+ tool.subarray is not None
167
+ ), f"Subarray not loaded for {particle_type} R={reflectivity}"
168
+ assert (
169
+ tool.aggregator is not None
170
+ ), f"Aggregator not initialized for {particle_type} R={reflectivity}"
171
+
172
+ # Test processing phase
173
+ tool.start()
174
+
175
+ # Verify containers were created
176
+ assert hasattr(
177
+ tool, "throughput_containers"
178
+ ), "Throughput containers not initialized"
179
+
180
+ # Should have results for telescope 1 (if processing succeeded)
181
+ containers = tool.throughput_containers.get(1, [])
182
+ if containers: # Only check if processing succeeded
183
+ # Verify container structure
184
+ for container in containers:
185
+ assert hasattr(
186
+ container, "mean"
187
+ ), f"Container missing mean for {particle_type} R={reflectivity}"
188
+ assert hasattr(
189
+ container, "std"
190
+ ), f"Container missing std for {particle_type} R={reflectivity}"
191
+ assert hasattr(
192
+ container, "n_events"
193
+ ), f"Container missing n_events for {particle_type} R={reflectivity}"
194
+ assert (
195
+ container.n_events > 0
196
+ ), f"No events processed for {particle_type} R={reflectivity}"
197
+ assert (
198
+ 0 < container.mean < 1
199
+ ), f"Invalid throughput mean {container.mean} for {particle_type} R={reflectivity}"
200
+ assert (
201
+ container.std >= 0
202
+ ), f"Invalid throughput std {container.std} for {particle_type} R={reflectivity}"
203
+
204
+ processed_files += 1
205
+
206
+ # Verify we attempted to process all expected files
207
+ assert (
208
+ processed_files == 6
209
+ ), f"Expected to process 6 files, processed {processed_files}"
210
+
211
+ @pytest.fixture(scope="class")
212
+ def simulation_results(self, test_config, muon_test_files):
213
+ """
214
+ Fixture that processes simulation data once and provides results for performance tests.
215
+
216
+ This avoids re-running the analysis multiple times for different performance tests.
217
+
218
+ Each chunk now includes:
219
+ - throughput: Individual throughput measurement
220
+ - uncertainty: Statistical uncertainty
221
+ - systematic_uncertainty: Systematic uncertainty estimated using MLE
222
+ - total_uncertainty: Combined statistical + systematic uncertainty
223
+ - total_relative_uncertainty: Total relative uncertainty as percentage
224
+ - weighted_mean: Weighted mean for the reflectivity group
225
+ """
226
+ results = {}
227
+
228
+ # Process each muon file and extract throughput measurements
229
+ for particle_type, reflectivity_files in muon_test_files.items():
230
+ results[particle_type] = {}
231
+
232
+ for reflectivity, file_path in reflectivity_files.items():
233
+ # Configure and run tool
234
+ test_config["CalculateThroughputWithMuons"]["input_url"] = str(
235
+ file_path
236
+ )
237
+ tool = CalculateThroughputWithMuons(config=Config(test_config))
238
+ tool.setup()
239
+ tool.start()
240
+
241
+ # Extract results if processing succeeded
242
+ containers = tool.throughput_containers.get(1, [])
243
+ if containers:
244
+ # Store individual chunk results (not combined)
245
+ chunk_results = []
246
+ for container in containers:
247
+ # Ensure scalar extraction from container attributes
248
+ mean_val = container.mean
249
+ std_val = container.std
250
+ n_events_val = container.n_events
251
+
252
+ # Convert to scalar using numpy.squeeze
253
+ mean_val = float(np.squeeze(mean_val))
254
+ std_val = float(np.squeeze(std_val))
255
+ n_events_val = int(np.squeeze(n_events_val))
256
+
257
+ uncertainty_val = std_val / np.sqrt(n_events_val)
258
+ rel_unc_val = (uncertainty_val / mean_val) * 100
259
+
260
+ chunk_data = {
261
+ "throughput": mean_val,
262
+ "uncertainty": uncertainty_val,
263
+ "n_events": n_events_val,
264
+ "relative_uncertainty": rel_unc_val,
265
+ }
266
+ chunk_results.append(chunk_data)
267
+
268
+ results[particle_type][reflectivity] = chunk_results
269
+
270
+ # Calculate systematic uncertainties and add to chunk results
271
+ for particle_type, reflectivity_data in results.items():
272
+ for reflectivity, chunk_list in reflectivity_data.items():
273
+ if (
274
+ len(chunk_list) >= 2
275
+ ): # Need at least 2 measurements for systematic analysis
276
+ # Extract arrays for systematic uncertainty estimation
277
+ measurements = np.array(
278
+ [chunk["throughput"] for chunk in chunk_list]
279
+ )
280
+ stat_errors = np.array(
281
+ [chunk["uncertainty"] for chunk in chunk_list]
282
+ )
283
+
284
+ # Estimate systematic uncertainty using MLE method
285
+ mu_hat, sigma_sys_hat, ci = estimate_sigma_sys(
286
+ measurements, stat_errors
287
+ )
288
+
289
+ # Add systematic uncertainty info to each chunk
290
+ for chunk in chunk_list:
291
+ stat_uncertainty = chunk["uncertainty"]
292
+ total_uncertainty = np.sqrt(
293
+ stat_uncertainty**2 + sigma_sys_hat**2
294
+ )
295
+ chunk["systematic_uncertainty"] = sigma_sys_hat
296
+ chunk["total_uncertainty"] = total_uncertainty
297
+ chunk["weighted_mean"] = mu_hat
298
+ chunk["total_relative_uncertainty"] = (
299
+ total_uncertainty / mu_hat
300
+ ) * 100
301
+ else:
302
+ # For single measurements, no systematic uncertainty can be estimated
303
+ for chunk in chunk_list:
304
+ chunk["systematic_uncertainty"] = 0.0
305
+ chunk["total_uncertainty"] = chunk["uncertainty"]
306
+ chunk["weighted_mean"] = chunk["throughput"]
307
+ chunk["total_relative_uncertainty"] = chunk[
308
+ "relative_uncertainty"
309
+ ]
310
+
311
+ # Validate fixture results once, instead of in every test
312
+ assert results, "No simulation data was processed"
313
+ # Verify we have both particle types with data
314
+ for particle_type, reflectivity_data in results.items():
315
+ assert reflectivity_data, f"No reflectivity data for {particle_type}"
316
+ for reflectivity, chunk_list in reflectivity_data.items():
317
+ assert chunk_list, f"No chunks for {particle_type} at R={reflectivity}"
318
+
319
+ return results
320
+
321
+ @pytest.mark.muon()
322
+ @pytest.mark.lst()
323
+ def test_relative_uncertainty_requirement(self, simulation_results):
324
+ """
325
+ Test that all simulation measurements meet the ≤ 5% relative uncertainty requirement.
326
+ Uses total uncertainty (statistical + systematic).
327
+ """
328
+ # Test relative uncertainty requirement (≤ 5%) for each chunk using total uncertainty
329
+ for particle_type, reflectivity_data in simulation_results.items():
330
+ for reflectivity, chunk_list in reflectivity_data.items():
331
+ for i, chunk in enumerate(chunk_list):
332
+ # Use total relative uncertainty (statistical + systematic)
333
+ total_rel_unc = chunk["total_relative_uncertainty"]
334
+ stat_rel_unc = chunk["relative_uncertainty"]
335
+ syst_unc = chunk["systematic_uncertainty"]
336
+
337
+ assert total_rel_unc <= 5.0, (
338
+ f"{particle_type} at R={reflectivity} chunk {i+1}: Total relative uncertainty "
339
+ f"{total_rel_unc:.2f}% > 5.0% requirement "
340
+ f"(stat: {stat_rel_unc:.2f}%, syst: {syst_unc:.4f})"
341
+ )
342
+
343
+ @pytest.mark.muon()
344
+ @pytest.mark.lst()
345
+ def test_throughput_monotonicity(self, simulation_results):
346
+ """
347
+ Test that throughput increases with reflectivity (monotonicity expectation).
348
+ """
349
+ # Test throughput increases with reflectivity for each chunk comparison
350
+ for particle_type, reflectivity_data in simulation_results.items():
351
+ # Sort by reflectivity value but keep original string keys
352
+ reflectivity_keys = sorted(reflectivity_data.keys(), key=float)
353
+
354
+ # Compare chunks between different reflectivity values
355
+ for i in range(len(reflectivity_keys) - 1):
356
+ current_r_key = reflectivity_keys[i]
357
+ next_r_key = reflectivity_keys[i + 1]
358
+ current_r = float(current_r_key)
359
+ next_r = float(next_r_key)
360
+
361
+ current_chunks = reflectivity_data[current_r_key]
362
+ next_chunks = reflectivity_data[next_r_key]
363
+
364
+ # Calculate average throughput for each reflectivity
365
+ current_avg = np.mean([chunk["throughput"] for chunk in current_chunks])
366
+ next_avg = np.mean([chunk["throughput"] for chunk in next_chunks])
367
+
368
+ # Check general increasing trend (allowing measurement uncertainty)
369
+ assert next_avg >= current_avg, (
370
+ f"{particle_type}: Average throughput decreases significantly from R={current_r:.2f} "
371
+ f"to R={next_r:.2f} ({current_avg:.4f} -> {next_avg:.4f})"
372
+ )
373
+
374
+ @pytest.mark.muon()
375
+ @pytest.mark.lst()
376
+ def test_statistical_significance(self, simulation_results):
377
+ """
378
+ Test that throughput changes between reflectivity values are statistically significant.
379
+ Uses total uncertainty (statistical + systematic) for significance calculations.
380
+ """
381
+ # Test statistical significance of throughput changes using chunk data
382
+ for particle_type, reflectivity_data in simulation_results.items():
383
+ reflectivity_keys = sorted(reflectivity_data.keys(), key=float)
384
+
385
+ for i in range(len(reflectivity_keys) - 1):
386
+ r1_key, r2_key = reflectivity_keys[i], reflectivity_keys[i + 1]
387
+ chunks1 = reflectivity_data[r1_key]
388
+ chunks2 = reflectivity_data[r2_key]
389
+
390
+ # Extract values directly from chunks
391
+ throughputs1 = [chunk["throughput"] for chunk in chunks1]
392
+ uncertainties1 = [chunk["total_uncertainty"] for chunk in chunks1]
393
+ throughputs2 = [chunk["throughput"] for chunk in chunks2]
394
+ uncertainties2 = [chunk["total_uncertainty"] for chunk in chunks2]
395
+
396
+ mean1 = np.mean(throughputs1)
397
+ mean2 = np.mean(throughputs2)
398
+
399
+ # Combined uncertainty (standard error of the means)
400
+ sem1 = np.sqrt(np.sum(np.array(uncertainties1) ** 2)) / len(chunks1)
401
+ sem2 = np.sqrt(np.sum(np.array(uncertainties2) ** 2)) / len(chunks2)
402
+ combined_uncertainty = np.sqrt(sem1**2 + sem2**2)
403
+
404
+ if combined_uncertainty > 0:
405
+ throughput_diff = abs(mean2 - mean1)
406
+ significance = throughput_diff / combined_uncertainty
407
+
408
+ # Require at least 3σ significance
409
+ assert significance >= 3.0, (
410
+ f"{particle_type}: Throughput change from R={r1_key} to R={r2_key} "
411
+ f"has significance {significance:.1f}σ < 1.0σ requirement "
412
+ f"(diff: {throughput_diff:.4f}, unc: {combined_uncertainty:.4f})"
413
+ )
414
+
415
+ @pytest.mark.muon()
416
+ @pytest.mark.lst()
417
+ def test_linearity_validation(self, simulation_results):
418
+ """
419
+ Test that throughput vs reflectivity follows a linear relationship.
420
+ Uses total uncertainty (statistical + systematic) for weighted fitting.
421
+ """
422
+ # Test linearity validation using all individual chunks
423
+ for particle_type, reflectivity_data in simulation_results.items():
424
+ # Collect all chunk data points for fitting
425
+ all_reflectivities = []
426
+ all_throughputs = []
427
+ all_uncertainties = []
428
+
429
+ for reflectivity_str, chunks in reflectivity_data.items():
430
+ reflectivity_val = float(reflectivity_str)
431
+
432
+ # Extract values directly from chunks
433
+ for chunk in chunks:
434
+ all_reflectivities.append(reflectivity_val)
435
+ all_throughputs.append(chunk["throughput"])
436
+ all_uncertainties.append(chunk["total_uncertainty"])
437
+
438
+ reflectivities = np.array(all_reflectivities)
439
+ throughputs = np.array(all_throughputs)
440
+ uncertainties = np.array(all_uncertainties)
441
+ # Perform weighted linear fit using all chunk data
442
+ popt, pcov = curve_fit(
443
+ self.linear_model,
444
+ reflectivities,
445
+ throughputs,
446
+ sigma=uncertainties,
447
+ absolute_sigma=True,
448
+ )
449
+
450
+ # Calculate R-squared
451
+ y_fit = self.linear_model(reflectivities, *popt)
452
+ ss_res = np.sum((throughputs - y_fit) ** 2)
453
+ ss_tot = np.sum((throughputs - np.mean(throughputs)) ** 2)
454
+ r_squared = 1 - (ss_res / ss_tot) if ss_tot > 0 else 0
455
+
456
+ # Require reasonable correlation (R² ≥ 0.8 for individual chunk data, i.e. within 5%)
457
+ assert (
458
+ r_squared >= 0.8
459
+ ), f"{particle_type}: Linear fit R² = {r_squared:.3f} < 0.8 requirement ({len(throughputs)} chunks)"
460
+
461
+ @pytest.mark.muon()
462
+ @pytest.mark.lst()
463
+ def test_particle_type_consistency(self, simulation_results):
464
+ """
465
+ Test consistency between μ- and μ+ measurements at the same reflectivity.
466
+ Uses 95% confidence level test to ensure measurements are statistically consistent.
467
+ """
468
+ from scipy.stats import t
469
+
470
+ # Test consistency between particle types using 95% confidence level
471
+ muon_minus_data = simulation_results["μ-"]
472
+ muon_plus_data = simulation_results["μ+"]
473
+
474
+ for reflectivity in muon_minus_data.keys():
475
+ minus_chunks = muon_minus_data[reflectivity]
476
+ plus_chunks = muon_plus_data[reflectivity]
477
+
478
+ # Ensure we have sufficient data for statistical test
479
+ assert len(minus_chunks) >= 1, f"No μ- chunks at R={reflectivity}"
480
+ assert len(plus_chunks) >= 1, f"No μ+ chunks at R={reflectivity}"
481
+
482
+ # Extract measurements and total uncertainties directly from chunks
483
+ minus_measurements = np.array(
484
+ [chunk["throughput"] for chunk in minus_chunks]
485
+ )
486
+ plus_measurements = np.array([chunk["throughput"] for chunk in plus_chunks])
487
+ minus_uncertainties = [chunk["total_uncertainty"] for chunk in minus_chunks]
488
+ plus_uncertainties = [chunk["total_uncertainty"] for chunk in plus_chunks]
489
+ minus_uncertainties = np.array(minus_uncertainties)
490
+ plus_uncertainties = np.array(plus_uncertainties)
491
+
492
+ # Calculate weighted means and their uncertainties
493
+ minus_weights = 1.0 / (minus_uncertainties**2)
494
+ plus_weights = 1.0 / (plus_uncertainties**2)
495
+
496
+ minus_weighted_mean = np.sum(minus_weights * minus_measurements) / np.sum(
497
+ minus_weights
498
+ )
499
+ plus_weighted_mean = np.sum(plus_weights * plus_measurements) / np.sum(
500
+ plus_weights
501
+ )
502
+
503
+ minus_weighted_uncertainty = 1.0 / np.sqrt(np.sum(minus_weights))
504
+ plus_weighted_uncertainty = 1.0 / np.sqrt(np.sum(plus_weights))
505
+
506
+ # Calculate difference and combined uncertainty
507
+ difference = plus_weighted_mean - minus_weighted_mean
508
+ combined_uncertainty = np.sqrt(
509
+ minus_weighted_uncertainty**2 + plus_weighted_uncertainty**2
510
+ )
511
+
512
+ # Perform 95% confidence level test (|z| < 1.96 for normal distribution)
513
+ # or use t-distribution for small samples
514
+ z_score = abs(difference) / combined_uncertainty
515
+
516
+ # Use t-distribution with effective degrees of freedom
517
+ # Satterthwaite approximation for unequal variances
518
+ dof_eff = (
519
+ minus_weighted_uncertainty**2 + plus_weighted_uncertainty**2
520
+ ) ** 2 / (
521
+ minus_weighted_uncertainty**4 / max(len(minus_chunks) - 1, 1)
522
+ + plus_weighted_uncertainty**4 / max(len(plus_chunks) - 1, 1)
523
+ )
524
+
525
+ # 95% confidence level: p < 0.05, so t_critical for two-tailed test
526
+ t_critical = t.ppf(0.975, dof_eff) # 0.975 = 1 - 0.05/2
527
+
528
+ # Get systematic uncertainties for reporting
529
+ minus_syst = (
530
+ minus_chunks[0]["systematic_uncertainty"] if minus_chunks else 0.0
531
+ )
532
+ plus_syst = plus_chunks[0]["systematic_uncertainty"] if plus_chunks else 0.0
533
+
534
+ assert z_score <= t_critical, (
535
+ f"At R={reflectivity}: μ- and μ+ measurements are inconsistent at 95% CL "
536
+ f"(|t| = {z_score:.2f} > {t_critical:.2f}, dof_eff = {dof_eff:.1f})\n"
537
+ f"μ-: {minus_weighted_mean:.6f} ± {minus_weighted_uncertainty:.6f} "
538
+ f"(N={len(minus_chunks)}, σ_sys={minus_syst:.6f})\n"
539
+ f"μ+: {plus_weighted_mean:.6f} ± {plus_weighted_uncertainty:.6f} "
540
+ f"(N={len(plus_chunks)}, σ_sys={plus_syst:.6f})\n"
541
+ f"Difference: {difference:.6f} ± {combined_uncertainty:.6f}"
542
+ )
543
+
544
+ @pytest.mark.parametrize(
545
+ parameter_names,
546
+ [
547
+ Parameters(
548
+ muon_sign_first="μ-",
549
+ mirror_reflectivity_first="0.80",
550
+ muon_sign_second="μ+",
551
+ mirror_reflectivity_second="0.80",
552
+ parameter_to_compare="throughput",
553
+ absolute_consistency_range=0.001,
554
+ relative_consistency_range=0.0,
555
+ are_expected_to_differ=False,
556
+ ),
557
+ Parameters(
558
+ muon_sign_first="μ-",
559
+ mirror_reflectivity_first="0.81",
560
+ muon_sign_second="μ+",
561
+ mirror_reflectivity_second="0.81",
562
+ parameter_to_compare="throughput",
563
+ absolute_consistency_range=0.001,
564
+ relative_consistency_range=0.0,
565
+ are_expected_to_differ=False,
566
+ ),
567
+ Parameters(
568
+ muon_sign_first="μ-",
569
+ mirror_reflectivity_first="0.83",
570
+ muon_sign_second="μ+",
571
+ mirror_reflectivity_second="0.83",
572
+ parameter_to_compare="throughput",
573
+ absolute_consistency_range=0.001,
574
+ relative_consistency_range=0.0,
575
+ are_expected_to_differ=False,
576
+ ),
577
+ Parameters(
578
+ muon_sign_first="μ-",
579
+ mirror_reflectivity_first="0.80",
580
+ muon_sign_second="μ-",
581
+ mirror_reflectivity_second="0.83",
582
+ parameter_to_compare="throughput",
583
+ absolute_consistency_range=0.001,
584
+ relative_consistency_range=0.0,
585
+ are_expected_to_differ=True,
586
+ ),
587
+ ],
588
+ )
589
+ @pytest.mark.muon()
590
+ @pytest.mark.lst()
591
+ def test_check_comparative_consistency(
592
+ self,
593
+ muon_sign_first,
594
+ mirror_reflectivity_first,
595
+ muon_sign_second,
596
+ mirror_reflectivity_second,
597
+ parameter_to_compare,
598
+ absolute_consistency_range,
599
+ relative_consistency_range,
600
+ are_expected_to_differ,
601
+ simulation_results,
602
+ ):
603
+ """
604
+ Comparative consistency test of two measurements or simulations.
605
+ The mean measured parameters are compared to ensure they fall within the specified range.
606
+ """
607
+
608
+ first = np.fromiter(
609
+ (
610
+ row[parameter_to_compare]
611
+ for row in simulation_results[muon_sign_first][
612
+ mirror_reflectivity_first
613
+ ]
614
+ ),
615
+ dtype=float,
616
+ )
617
+ second = np.fromiter(
618
+ (
619
+ row[parameter_to_compare]
620
+ for row in simulation_results[muon_sign_second][
621
+ mirror_reflectivity_second
622
+ ]
623
+ ),
624
+ dtype=float,
625
+ )
626
+
627
+ if len(first) != len(second):
628
+ first = np.nanmean(first)
629
+ second = np.nanmean(second)
630
+
631
+ if are_expected_to_differ:
632
+ assert ~u.isclose(
633
+ first,
634
+ second,
635
+ atol=absolute_consistency_range,
636
+ rtol=relative_consistency_range,
637
+ ).any()
638
+ else:
639
+ assert u.isclose(
640
+ first,
641
+ second,
642
+ atol=absolute_consistency_range,
643
+ rtol=relative_consistency_range,
644
+ ).any()
645
+
646
+ @pytest.mark.parametrize(
647
+ parameter_mst_names,
648
+ [
649
+ Parameters_mst(
650
+ file_fixture_name="muon_mst_nc_file",
651
+ expected_throughput=0.18,
652
+ expected_throughput_rel_uncertainty=0.05,
653
+ ),
654
+ Parameters_mst(
655
+ file_fixture_name="muon_mst_fc_file",
656
+ expected_throughput=0.2,
657
+ expected_throughput_rel_uncertainty=0.05,
658
+ ),
659
+ ],
660
+ )
661
+ @pytest.mark.muon()
662
+ @pytest.mark.mst()
663
+ def test_check_mst(
664
+ self,
665
+ request,
666
+ file_fixture_name,
667
+ expected_throughput,
668
+ expected_throughput_rel_uncertainty,
669
+ test_config,
670
+ muon_mst_nc_file,
671
+ muon_mst_fc_file,
672
+ ):
673
+ """
674
+ Comparative consistency test of measurements or simulations for MST (NC/FC).
675
+ """
676
+
677
+ test_config["CalculateThroughputWithMuons"]["input_url"] = str(
678
+ request.getfixturevalue(file_fixture_name)
679
+ )
680
+ tool = CalculateThroughputWithMuons(config=Config(test_config))
681
+ tool.setup()
682
+ tool.start()
683
+
684
+ # Extract results if processing succeeded
685
+ containers = tool.throughput_containers.get(1, [])
686
+ if containers:
687
+ mean_val = np.array([container.mean for container in containers])
688
+
689
+ assert u.isclose(
690
+ mean_val,
691
+ expected_throughput * np.ones(len(mean_val)),
692
+ rtol=expected_throughput_rel_uncertainty,
693
+ ).any()