gammasimtools 0.16.0__py3-none-any.whl → 0.17.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {gammasimtools-0.16.0.dist-info → gammasimtools-0.17.0.dist-info}/METADATA +4 -2
- {gammasimtools-0.16.0.dist-info → gammasimtools-0.17.0.dist-info}/RECORD +60 -54
- {gammasimtools-0.16.0.dist-info → gammasimtools-0.17.0.dist-info}/WHEEL +1 -1
- {gammasimtools-0.16.0.dist-info → gammasimtools-0.17.0.dist-info}/entry_points.txt +3 -1
- simtools/_version.py +2 -2
- simtools/applications/derive_ctao_array_layouts.py +5 -5
- simtools/applications/generate_simtel_event_data.py +36 -46
- simtools/applications/merge_tables.py +104 -0
- simtools/applications/plot_array_layout.py +145 -258
- simtools/applications/production_derive_corsika_limits.py +35 -220
- simtools/applications/production_derive_statistics.py +77 -43
- simtools/applications/simulate_light_emission.py +1 -0
- simtools/applications/simulate_prod.py +30 -18
- simtools/applications/simulate_prod_htcondor_generator.py +0 -1
- simtools/applications/submit_array_layouts.py +93 -0
- simtools/applications/verify_simulation_model_production_tables.py +52 -0
- simtools/camera/camera_efficiency.py +3 -3
- simtools/configuration/commandline_parser.py +28 -34
- simtools/configuration/configurator.py +0 -4
- simtools/corsika/corsika_config.py +17 -12
- simtools/corsika/primary_particle.py +46 -13
- simtools/data_model/metadata_collector.py +7 -3
- simtools/db/db_handler.py +11 -11
- simtools/db/db_model_upload.py +2 -2
- simtools/io_operations/io_handler.py +2 -2
- simtools/io_operations/io_table_handler.py +345 -0
- simtools/job_execution/htcondor_script_generator.py +2 -2
- simtools/job_execution/job_manager.py +7 -121
- simtools/layout/array_layout_utils.py +385 -0
- simtools/model/array_model.py +5 -0
- simtools/model/model_repository.py +134 -0
- simtools/production_configuration/{calculate_statistical_errors_grid_point.py → calculate_statistical_uncertainties_grid_point.py} +101 -112
- simtools/production_configuration/derive_corsika_limits.py +239 -111
- simtools/production_configuration/derive_corsika_limits_grid.py +189 -0
- simtools/production_configuration/derive_production_statistics.py +57 -26
- simtools/production_configuration/derive_production_statistics_handler.py +70 -37
- simtools/production_configuration/interpolation_handler.py +296 -94
- simtools/ray_tracing/ray_tracing.py +7 -6
- simtools/reporting/docs_read_parameters.py +104 -62
- simtools/runners/corsika_simtel_runner.py +4 -1
- simtools/runners/runner_services.py +5 -4
- simtools/schemas/model_parameters/dsum_threshold.schema.yml +41 -0
- simtools/schemas/production_configuration_metrics.schema.yml +2 -2
- simtools/simtel/simtel_config_writer.py +34 -14
- simtools/simtel/simtel_io_event_reader.py +301 -194
- simtools/simtel/simtel_io_event_writer.py +207 -227
- simtools/simtel/simtel_io_file_info.py +9 -4
- simtools/simtel/simtel_io_metadata.py +20 -5
- simtools/simtel/simulator_array.py +2 -2
- simtools/simtel/simulator_light_emission.py +79 -34
- simtools/simtel/simulator_ray_tracing.py +2 -2
- simtools/simulator.py +101 -68
- simtools/testing/validate_output.py +4 -1
- simtools/utils/general.py +1 -1
- simtools/utils/names.py +5 -5
- simtools/visualization/plot_array_layout.py +242 -0
- simtools/visualization/plot_pixels.py +681 -0
- simtools/visualization/visualize.py +3 -219
- simtools/applications/production_generate_simulation_config.py +0 -152
- simtools/layout/ctao_array_layouts.py +0 -172
- simtools/production_configuration/generate_simulation_config.py +0 -158
- {gammasimtools-0.16.0.dist-info → gammasimtools-0.17.0.dist-info}/licenses/LICENSE +0 -0
- {gammasimtools-0.16.0.dist-info → gammasimtools-0.17.0.dist-info}/top_level.txt +0 -0
|
@@ -6,21 +6,21 @@ import numpy as np
|
|
|
6
6
|
from astropy import units as u
|
|
7
7
|
from astropy.io import fits
|
|
8
8
|
|
|
9
|
-
__all__ = ["
|
|
9
|
+
__all__ = ["StatisticalUncertaintyEvaluator"]
|
|
10
10
|
|
|
11
11
|
|
|
12
|
-
class
|
|
12
|
+
class StatisticalUncertaintyEvaluator:
|
|
13
13
|
"""
|
|
14
|
-
|
|
14
|
+
Evaluate statistical uncertainties for a metric at a point in the observational parameter grid.
|
|
15
15
|
|
|
16
16
|
Parameters
|
|
17
17
|
----------
|
|
18
18
|
file_path : str
|
|
19
19
|
Path to the DL2 MC event file.
|
|
20
|
-
metrics : dict
|
|
21
|
-
Dictionary of metrics to evaluate.
|
|
20
|
+
metrics : dict
|
|
21
|
+
Dictionary of metrics to evaluate.
|
|
22
22
|
grid_point : tuple, optional
|
|
23
|
-
|
|
23
|
+
Grid point (energy, azimuth, zenith, NSB, offset).
|
|
24
24
|
"""
|
|
25
25
|
|
|
26
26
|
def __init__(
|
|
@@ -35,13 +35,9 @@ class StatisticalErrorEvaluator:
|
|
|
35
35
|
self.grid_point = grid_point
|
|
36
36
|
|
|
37
37
|
self.data = self.load_data_from_file(file_path)
|
|
38
|
+
self.energy_bin_edges = self.create_energy_bin_edges()
|
|
38
39
|
|
|
39
|
-
self.
|
|
40
|
-
self.energy_estimate = None
|
|
41
|
-
self.sigma_energy = None
|
|
42
|
-
self.delta_energy = None
|
|
43
|
-
|
|
44
|
-
self.metric_results = None
|
|
40
|
+
self.metric_results = {}
|
|
45
41
|
self.energy_threshold = None
|
|
46
42
|
|
|
47
43
|
def _load_event_data(self, hdul, data_type):
|
|
@@ -76,21 +72,14 @@ class StatisticalErrorEvaluator:
|
|
|
76
72
|
unique_azimuths = np.unique(events_data["PNT_AZ"]) * u.deg
|
|
77
73
|
unique_zeniths = 90 * u.deg - np.unique(events_data["PNT_ALT"]) * u.deg
|
|
78
74
|
if len(unique_azimuths) > 1 or len(unique_zeniths) > 1:
|
|
79
|
-
msg = (
|
|
80
|
-
f"Multiple values found for azimuth ({unique_azimuths}) zenith ({unique_zeniths})."
|
|
81
|
-
)
|
|
75
|
+
msg = f"Multiple values found for azimuth ({unique_azimuths}) zenith ({unique_zeniths})"
|
|
82
76
|
self._logger.error(msg)
|
|
83
77
|
raise ValueError(msg)
|
|
84
|
-
if self.grid_point is not None:
|
|
85
|
-
self._logger.warning(
|
|
86
|
-
f"Grid point already set to: {self.grid_point}. "
|
|
87
|
-
"Overwriting with new values from file."
|
|
88
|
-
)
|
|
89
78
|
self.grid_point = (
|
|
90
79
|
1 * u.TeV,
|
|
91
80
|
unique_azimuths[0],
|
|
92
81
|
unique_zeniths[0],
|
|
93
|
-
0,
|
|
82
|
+
0, # NSB needs to be read and set here
|
|
94
83
|
0 * u.deg,
|
|
95
84
|
)
|
|
96
85
|
self._logger.info(f"Grid point values: {self.grid_point}")
|
|
@@ -127,7 +116,7 @@ class StatisticalErrorEvaluator:
|
|
|
127
116
|
raise FileNotFoundError(error_message) from e
|
|
128
117
|
return data
|
|
129
118
|
|
|
130
|
-
def
|
|
119
|
+
def create_energy_bin_edges(self):
|
|
131
120
|
"""
|
|
132
121
|
Create unique energy bin edges.
|
|
133
122
|
|
|
@@ -164,7 +153,9 @@ class StatisticalErrorEvaluator:
|
|
|
164
153
|
)
|
|
165
154
|
return reconstructed_event_histogram * u.count
|
|
166
155
|
|
|
167
|
-
def
|
|
156
|
+
def compute_efficiency_and_uncertainties(
|
|
157
|
+
self, reconstructed_event_counts, simulated_event_counts
|
|
158
|
+
):
|
|
168
159
|
"""
|
|
169
160
|
Compute reconstructed event efficiency and its uncertainty assuming binomial distribution.
|
|
170
161
|
|
|
@@ -179,7 +170,7 @@ class StatisticalErrorEvaluator:
|
|
|
179
170
|
-------
|
|
180
171
|
efficiencies : array
|
|
181
172
|
Array of calculated efficiencies.
|
|
182
|
-
|
|
173
|
+
relative_uncertainties : array
|
|
183
174
|
Array of relative uncertainties.
|
|
184
175
|
"""
|
|
185
176
|
# Ensure the inputs have compatible units
|
|
@@ -220,15 +211,15 @@ class StatisticalErrorEvaluator:
|
|
|
220
211
|
)
|
|
221
212
|
)
|
|
222
213
|
|
|
223
|
-
# Compute relative
|
|
224
|
-
|
|
225
|
-
uncertainties,
|
|
214
|
+
# Compute relative uncertainties
|
|
215
|
+
relative_uncertainties = np.divide(
|
|
216
|
+
uncertainties.value,
|
|
226
217
|
np.sqrt(simulated_event_counts.value),
|
|
227
|
-
out=np.zeros_like(uncertainties, dtype=float),
|
|
228
|
-
where=uncertainties > 0,
|
|
218
|
+
out=np.zeros_like(uncertainties.value, dtype=float),
|
|
219
|
+
where=uncertainties.value > 0,
|
|
229
220
|
)
|
|
230
221
|
|
|
231
|
-
return efficiencies,
|
|
222
|
+
return efficiencies, relative_uncertainties
|
|
232
223
|
|
|
233
224
|
def calculate_energy_threshold(self, requested_eff_area_fraction=0.1):
|
|
234
225
|
"""
|
|
@@ -239,13 +230,12 @@ class StatisticalErrorEvaluator:
|
|
|
239
230
|
float
|
|
240
231
|
Energy threshold value.
|
|
241
232
|
"""
|
|
242
|
-
bin_edges = self.create_bin_edges()
|
|
243
233
|
reconstructed_event_histogram = self.compute_reconstructed_event_histogram(
|
|
244
|
-
self.data["event_energies_mc"],
|
|
234
|
+
self.data["event_energies_mc"], self.energy_bin_edges
|
|
245
235
|
)
|
|
246
236
|
simulated_event_histogram = self.data["simulated_event_histogram"]
|
|
247
237
|
|
|
248
|
-
efficiencies, _ = self.
|
|
238
|
+
efficiencies, _ = self.compute_efficiency_and_uncertainties(
|
|
249
239
|
reconstructed_event_histogram, simulated_event_histogram
|
|
250
240
|
)
|
|
251
241
|
|
|
@@ -257,7 +247,7 @@ class StatisticalErrorEvaluator:
|
|
|
257
247
|
if threshold_index == 0 and efficiencies[0] < threshold_efficiency:
|
|
258
248
|
return
|
|
259
249
|
|
|
260
|
-
self.energy_threshold =
|
|
250
|
+
self.energy_threshold = self.energy_bin_edges[threshold_index]
|
|
261
251
|
|
|
262
252
|
def calculate_uncertainty_effective_area(self):
|
|
263
253
|
"""
|
|
@@ -265,18 +255,42 @@ class StatisticalErrorEvaluator:
|
|
|
265
255
|
|
|
266
256
|
Returns
|
|
267
257
|
-------
|
|
268
|
-
|
|
258
|
+
dict
|
|
269
259
|
Dictionary with uncertainties for the file.
|
|
270
260
|
"""
|
|
271
|
-
bin_edges = self.create_bin_edges()
|
|
272
261
|
reconstructed_event_histogram = self.compute_reconstructed_event_histogram(
|
|
273
|
-
self.data["event_energies_mc"],
|
|
262
|
+
self.data["event_energies_mc"], self.energy_bin_edges
|
|
274
263
|
)
|
|
275
264
|
simulated_event_histogram = self.data["simulated_event_histogram"]
|
|
276
|
-
_,
|
|
265
|
+
_, relative_uncertainties = self.compute_efficiency_and_uncertainties(
|
|
277
266
|
reconstructed_event_histogram, simulated_event_histogram
|
|
278
267
|
)
|
|
279
|
-
return {"
|
|
268
|
+
return {"relative_uncertainties": relative_uncertainties}
|
|
269
|
+
|
|
270
|
+
def calculate_max_error_for_effective_area(self):
|
|
271
|
+
"""
|
|
272
|
+
Calculate the maximum relative uncertainty for effective area within the validity range.
|
|
273
|
+
|
|
274
|
+
Returns
|
|
275
|
+
-------
|
|
276
|
+
max_error : float
|
|
277
|
+
Maximum relative error.
|
|
278
|
+
"""
|
|
279
|
+
energy_range = self.metrics.get("uncertainty_effective_area", {}).get("energy_range")
|
|
280
|
+
|
|
281
|
+
min_energy, max_energy = (
|
|
282
|
+
energy_range["value"][0] * u.Unit(energy_range["unit"]),
|
|
283
|
+
energy_range["value"][1] * u.Unit(energy_range["unit"]),
|
|
284
|
+
)
|
|
285
|
+
valid_uncertainties = [
|
|
286
|
+
error
|
|
287
|
+
for energy, error in zip(
|
|
288
|
+
self.data["bin_edges_low"],
|
|
289
|
+
self.metric_results["uncertainty_effective_area"]["relative_uncertainties"],
|
|
290
|
+
)
|
|
291
|
+
if min_energy <= energy <= max_energy
|
|
292
|
+
]
|
|
293
|
+
return max(valid_uncertainties)
|
|
280
294
|
|
|
281
295
|
def calculate_energy_estimate(self):
|
|
282
296
|
"""
|
|
@@ -287,7 +301,7 @@ class StatisticalErrorEvaluator:
|
|
|
287
301
|
float
|
|
288
302
|
The calculated uncertainty for energy estimation.
|
|
289
303
|
"""
|
|
290
|
-
logging.info("Calculating Energy Resolution
|
|
304
|
+
logging.info("Calculating Energy Resolution Uncertainty")
|
|
291
305
|
|
|
292
306
|
event_energies_reco = self.data["event_energies_reco"]
|
|
293
307
|
event_energies_mc = self.data["event_energies_mc"]
|
|
@@ -300,87 +314,59 @@ class StatisticalErrorEvaluator:
|
|
|
300
314
|
|
|
301
315
|
energy_deviation = (event_energies_reco - event_energies_mc) / event_energies_mc
|
|
302
316
|
|
|
303
|
-
|
|
304
|
-
bin_indices = np.digitize(event_energies_reco, bin_edges) - 1
|
|
317
|
+
bin_indices = np.digitize(event_energies_reco, self.energy_bin_edges) - 1
|
|
305
318
|
|
|
306
319
|
energy_deviation_by_bin = [
|
|
307
|
-
energy_deviation[bin_indices == i] for i in range(len(
|
|
320
|
+
energy_deviation[bin_indices == i] for i in range(len(self.energy_bin_edges) - 1)
|
|
308
321
|
]
|
|
309
322
|
|
|
310
323
|
# Calculate sigma for each bin
|
|
311
|
-
sigma_energy = [np.std(d) if len(d) > 0 else np.nan for d in energy_deviation_by_bin]
|
|
324
|
+
sigma_energy = [np.std(d.value) if len(d) > 0 else np.nan for d in energy_deviation_by_bin]
|
|
312
325
|
|
|
313
326
|
# Calculate delta_energy as the mean deviation for each bin
|
|
314
|
-
delta_energy = [np.mean(d) if len(d) > 0 else np.nan for d in energy_deviation_by_bin]
|
|
327
|
+
delta_energy = [np.mean(d.value) if len(d) > 0 else np.nan for d in energy_deviation_by_bin]
|
|
315
328
|
|
|
316
329
|
# Combine sigma into a single measure
|
|
317
330
|
overall_uncertainty = np.nanmean(sigma_energy)
|
|
318
331
|
|
|
319
|
-
|
|
332
|
+
self.metric_results["energy_estimate"] = {
|
|
333
|
+
"overall_uncertainty": overall_uncertainty,
|
|
334
|
+
"sigma_energy": sigma_energy,
|
|
335
|
+
"delta_energy": delta_energy,
|
|
336
|
+
}
|
|
320
337
|
|
|
321
338
|
def calculate_metrics(self):
|
|
322
339
|
"""Calculate all defined metrics as specified in self.metrics and store results."""
|
|
323
340
|
if "uncertainty_effective_area" in self.metrics:
|
|
324
|
-
self.uncertainty_effective_area =
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
"energy_range"
|
|
328
|
-
)
|
|
329
|
-
min_energy, max_energy = (
|
|
330
|
-
energy_range["value"][0] * u.Unit(energy_range["unit"]),
|
|
331
|
-
energy_range["value"][1] * u.Unit(energy_range["unit"]),
|
|
332
|
-
)
|
|
333
|
-
|
|
334
|
-
valid_errors = [
|
|
335
|
-
error
|
|
336
|
-
for energy, error in zip(
|
|
337
|
-
self.data["bin_edges_low"],
|
|
338
|
-
self.uncertainty_effective_area["relative_errors"],
|
|
339
|
-
)
|
|
340
|
-
if min_energy <= energy <= max_energy
|
|
341
|
+
self.metric_results["uncertainty_effective_area"] = {
|
|
342
|
+
"relative_uncertainties": self.calculate_uncertainty_effective_area()[
|
|
343
|
+
"relative_uncertainties"
|
|
341
344
|
]
|
|
342
|
-
|
|
343
|
-
max(valid_errors) if valid_errors else 0.0
|
|
344
|
-
)
|
|
345
|
-
ref_value = self.metrics.get("uncertainty_effective_area", {}).get("target_error")[
|
|
346
|
-
"value"
|
|
347
|
-
]
|
|
348
|
-
self._logger.info(
|
|
349
|
-
f"Effective Area Error (max in validity range): "
|
|
350
|
-
f"{self.uncertainty_effective_area['max_error'].value:.6f}, "
|
|
351
|
-
f"Reference: {ref_value:.3f}"
|
|
352
|
-
)
|
|
345
|
+
}
|
|
353
346
|
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
self.calculate_energy_estimate()
|
|
347
|
+
self.metric_results["uncertainty_effective_area"]["max_error"] = (
|
|
348
|
+
self.calculate_max_error_for_effective_area()
|
|
357
349
|
)
|
|
358
|
-
ref_value = self.metrics.get("
|
|
350
|
+
ref_value = self.metrics.get("uncertainty_effective_area", {}).get(
|
|
351
|
+
"target_uncertainty"
|
|
352
|
+
)["value"]
|
|
359
353
|
self._logger.info(
|
|
360
|
-
f"
|
|
354
|
+
f"Effective Area Uncertainty (max in validity range): "
|
|
355
|
+
f"{self.metric_results['uncertainty_effective_area']['max_error']:.6f}, "
|
|
356
|
+
f"Reference: {ref_value:.3f}"
|
|
361
357
|
)
|
|
362
|
-
else:
|
|
363
|
-
raise ValueError("Invalid metric specified.")
|
|
364
|
-
self.metric_results = {
|
|
365
|
-
"uncertainty_effective_area": self.uncertainty_effective_area,
|
|
366
|
-
"energy_estimate": self.energy_estimate,
|
|
367
|
-
}
|
|
368
|
-
return self.metric_results
|
|
369
358
|
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
359
|
+
if "energy_estimate" in self.metrics:
|
|
360
|
+
self.calculate_energy_estimate()
|
|
361
|
+
ref_value = self.metrics.get("energy_estimate", {}).get("target_uncertainty")["value"]
|
|
362
|
+
self._logger.info(
|
|
363
|
+
f"Energy Estimate Uncertainty: "
|
|
364
|
+
f"{self.metric_results['energy_estimate']['overall_uncertainty']:.6f}, "
|
|
365
|
+
f"Reference: {ref_value:.3f}"
|
|
366
|
+
)
|
|
373
367
|
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
max_error : float
|
|
377
|
-
Maximum relative error.
|
|
378
|
-
"""
|
|
379
|
-
if "relative_errors" in self.metric_results["uncertainty_effective_area"]:
|
|
380
|
-
return np.max(self.metric_results["uncertainty_effective_area"]["relative_errors"])
|
|
381
|
-
if self.uncertainty_effective_area:
|
|
382
|
-
return np.max(self.uncertainty_effective_area["relative_errors"])
|
|
383
|
-
return None
|
|
368
|
+
if not ("uncertainty_effective_area" in self.metrics or "energy_estimate" in self.metrics):
|
|
369
|
+
raise ValueError("Invalid metric specified.")
|
|
384
370
|
|
|
385
371
|
def calculate_overall_metric(self, metric="average"):
|
|
386
372
|
"""
|
|
@@ -393,31 +379,34 @@ class StatisticalErrorEvaluator:
|
|
|
393
379
|
|
|
394
380
|
Returns
|
|
395
381
|
-------
|
|
396
|
-
|
|
397
|
-
|
|
382
|
+
float
|
|
383
|
+
The overall metric value.
|
|
398
384
|
"""
|
|
399
385
|
# Decide how to combine the metrics
|
|
400
386
|
if self.metric_results is None:
|
|
401
387
|
raise ValueError("Metrics have not been computed yet.")
|
|
402
388
|
|
|
403
|
-
|
|
389
|
+
overall_max_uncertainties = {}
|
|
404
390
|
|
|
405
391
|
for metric_name, result in self.metric_results.items():
|
|
406
392
|
if metric_name == "uncertainty_effective_area":
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
393
|
+
max_uncertainties = self.calculate_max_error_for_effective_area()
|
|
394
|
+
overall_max_uncertainties[metric_name] = (
|
|
395
|
+
max_uncertainties if max_uncertainties else 0
|
|
396
|
+
)
|
|
397
|
+
elif metric_name == "energy_estimate":
|
|
398
|
+
# Use the "overall_uncertainty"
|
|
399
|
+
overall_max_uncertainties[metric_name] = result["overall_uncertainty"]
|
|
413
400
|
else:
|
|
414
401
|
raise ValueError(f"Unsupported result type for {metric_name}: {type(result)}")
|
|
415
|
-
|
|
416
|
-
|
|
402
|
+
|
|
403
|
+
self._logger.info(f"overall_max_uncertainties {overall_max_uncertainties}")
|
|
404
|
+
all_max_uncertainties = list(overall_max_uncertainties.values())
|
|
405
|
+
|
|
417
406
|
if metric == "average":
|
|
418
|
-
overall_metric = np.mean(
|
|
407
|
+
overall_metric = np.mean(all_max_uncertainties)
|
|
419
408
|
elif metric == "maximum":
|
|
420
|
-
overall_metric = np.
|
|
409
|
+
overall_metric = np.nanmax(all_max_uncertainties)
|
|
421
410
|
else:
|
|
422
411
|
raise ValueError(f"Unsupported metric: {metric}")
|
|
423
412
|
|