gammasimtools 0.8.2__py3-none-any.whl → 0.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. {gammasimtools-0.8.2.dist-info → gammasimtools-0.10.0.dist-info}/METADATA +4 -4
  2. {gammasimtools-0.8.2.dist-info → gammasimtools-0.10.0.dist-info}/RECORD +119 -105
  3. {gammasimtools-0.8.2.dist-info → gammasimtools-0.10.0.dist-info}/WHEEL +1 -1
  4. {gammasimtools-0.8.2.dist-info → gammasimtools-0.10.0.dist-info}/entry_points.txt +4 -1
  5. simtools/_version.py +2 -2
  6. simtools/applications/calculate_trigger_rate.py +15 -38
  7. simtools/applications/convert_all_model_parameters_from_simtel.py +9 -28
  8. simtools/applications/convert_geo_coordinates_of_array_elements.py +54 -53
  9. simtools/applications/convert_model_parameter_from_simtel.py +2 -2
  10. simtools/applications/db_add_file_to_db.py +1 -2
  11. simtools/applications/db_add_simulation_model_from_repository_to_db.py +110 -0
  12. simtools/applications/db_add_value_from_json_to_db.py +2 -11
  13. simtools/applications/db_development_tools/write_array_elements_positions_to_repository.py +6 -6
  14. simtools/applications/db_get_array_layouts_from_db.py +3 -1
  15. simtools/applications/db_get_file_from_db.py +11 -12
  16. simtools/applications/db_get_parameter_from_db.py +44 -32
  17. simtools/applications/derive_mirror_rnda.py +10 -1
  18. simtools/applications/derive_photon_electron_spectrum.py +99 -0
  19. simtools/applications/derive_psf_parameters.py +1 -1
  20. simtools/applications/generate_array_config.py +18 -22
  21. simtools/applications/generate_regular_arrays.py +24 -21
  22. simtools/applications/generate_simtel_array_histograms.py +11 -48
  23. simtools/applications/plot_array_layout.py +3 -1
  24. simtools/applications/plot_tabular_data.py +84 -0
  25. simtools/applications/production_generate_simulation_config.py +25 -7
  26. simtools/applications/production_scale_events.py +3 -4
  27. simtools/applications/simulate_light_emission.py +2 -2
  28. simtools/applications/simulate_prod.py +25 -60
  29. simtools/applications/simulate_prod_htcondor_generator.py +95 -0
  30. simtools/applications/submit_data_from_external.py +12 -4
  31. simtools/applications/submit_model_parameter_from_external.py +8 -6
  32. simtools/applications/validate_camera_efficiency.py +3 -3
  33. simtools/applications/validate_camera_fov.py +3 -7
  34. simtools/applications/validate_cumulative_psf.py +3 -7
  35. simtools/applications/validate_file_using_schema.py +38 -24
  36. simtools/applications/validate_optics.py +3 -4
  37. simtools/{camera_efficiency.py → camera/camera_efficiency.py} +1 -4
  38. simtools/camera/single_photon_electron_spectrum.py +168 -0
  39. simtools/configuration/commandline_parser.py +14 -13
  40. simtools/configuration/configurator.py +6 -19
  41. simtools/constants.py +10 -3
  42. simtools/corsika/corsika_config.py +8 -7
  43. simtools/corsika/corsika_histograms.py +1 -1
  44. simtools/data_model/data_reader.py +0 -3
  45. simtools/data_model/metadata_collector.py +21 -4
  46. simtools/data_model/metadata_model.py +8 -111
  47. simtools/data_model/model_data_writer.py +18 -64
  48. simtools/data_model/schema.py +213 -0
  49. simtools/data_model/validate_data.py +73 -51
  50. simtools/db/db_handler.py +395 -790
  51. simtools/db/db_model_upload.py +139 -0
  52. simtools/io_operations/hdf5_handler.py +54 -24
  53. simtools/io_operations/legacy_data_handler.py +61 -0
  54. simtools/job_execution/htcondor_script_generator.py +133 -0
  55. simtools/job_execution/job_manager.py +77 -50
  56. simtools/layout/array_layout.py +33 -28
  57. simtools/model/array_model.py +13 -7
  58. simtools/model/camera.py +4 -2
  59. simtools/model/model_parameter.py +61 -63
  60. simtools/model/site_model.py +3 -3
  61. simtools/production_configuration/calculate_statistical_errors_grid_point.py +119 -144
  62. simtools/production_configuration/event_scaler.py +7 -17
  63. simtools/production_configuration/generate_simulation_config.py +5 -32
  64. simtools/production_configuration/interpolation_handler.py +8 -11
  65. simtools/ray_tracing/mirror_panel_psf.py +47 -27
  66. simtools/runners/corsika_runner.py +14 -3
  67. simtools/runners/corsika_simtel_runner.py +3 -1
  68. simtools/runners/runner_services.py +3 -3
  69. simtools/runners/simtel_runner.py +27 -8
  70. simtools/schemas/input/MST_mirror_2f_measurements.schema.yml +39 -0
  71. simtools/schemas/input/single_pe_spectrum.schema.yml +38 -0
  72. simtools/schemas/integration_tests_config.metaschema.yml +23 -3
  73. simtools/schemas/model_parameter.metaschema.yml +95 -2
  74. simtools/schemas/model_parameter_and_data_schema.metaschema.yml +2 -0
  75. simtools/schemas/model_parameters/array_element_position_utm.schema.yml +1 -1
  76. simtools/schemas/model_parameters/array_window.schema.yml +37 -0
  77. simtools/schemas/model_parameters/asum_clipping.schema.yml +0 -4
  78. simtools/schemas/model_parameters/channels_per_chip.schema.yml +1 -1
  79. simtools/schemas/model_parameters/corsika_iact_io_buffer.schema.yml +2 -2
  80. simtools/schemas/model_parameters/dsum_clipping.schema.yml +0 -2
  81. simtools/schemas/model_parameters/dsum_ignore_below.schema.yml +0 -2
  82. simtools/schemas/model_parameters/dsum_offset.schema.yml +0 -2
  83. simtools/schemas/model_parameters/dsum_pedsub.schema.yml +0 -2
  84. simtools/schemas/model_parameters/dsum_pre_clipping.schema.yml +0 -2
  85. simtools/schemas/model_parameters/dsum_prescale.schema.yml +0 -2
  86. simtools/schemas/model_parameters/dsum_presum_max.schema.yml +0 -2
  87. simtools/schemas/model_parameters/dsum_presum_shift.schema.yml +0 -2
  88. simtools/schemas/model_parameters/dsum_shaping.schema.yml +0 -2
  89. simtools/schemas/model_parameters/dsum_shaping_renormalize.schema.yml +0 -2
  90. simtools/schemas/model_parameters/dsum_threshold.schema.yml +0 -2
  91. simtools/schemas/model_parameters/dsum_zero_clip.schema.yml +0 -2
  92. simtools/schemas/model_parameters/effective_focal_length.schema.yml +31 -1
  93. simtools/schemas/model_parameters/fadc_compensate_pedestal.schema.yml +1 -1
  94. simtools/schemas/model_parameters/fadc_lg_compensate_pedestal.schema.yml +1 -1
  95. simtools/schemas/model_parameters/fadc_noise.schema.yml +3 -3
  96. simtools/schemas/model_parameters/fake_mirror_list.schema.yml +33 -0
  97. simtools/schemas/model_parameters/laser_photons.schema.yml +2 -2
  98. simtools/schemas/model_parameters/secondary_mirror_degraded_reflection.schema.yml +1 -1
  99. simtools/schemas/production_configuration_metrics.schema.yml +68 -0
  100. simtools/schemas/production_tables.schema.yml +41 -0
  101. simtools/simtel/simtel_config_writer.py +5 -6
  102. simtools/simtel/simtel_io_histogram.py +32 -67
  103. simtools/simtel/simtel_io_histograms.py +15 -30
  104. simtools/simtel/simtel_table_reader.py +410 -0
  105. simtools/simtel/simulator_array.py +2 -1
  106. simtools/simtel/simulator_camera_efficiency.py +11 -4
  107. simtools/simtel/simulator_light_emission.py +5 -3
  108. simtools/simtel/simulator_ray_tracing.py +2 -2
  109. simtools/simulator.py +80 -33
  110. simtools/testing/configuration.py +12 -8
  111. simtools/testing/helpers.py +9 -16
  112. simtools/testing/validate_output.py +152 -68
  113. simtools/utils/general.py +149 -12
  114. simtools/utils/names.py +25 -21
  115. simtools/utils/value_conversion.py +9 -1
  116. simtools/visualization/plot_tables.py +106 -0
  117. simtools/visualization/visualize.py +43 -5
  118. simtools/applications/db_add_model_parameters_from_repository_to_db.py +0 -184
  119. simtools/db/db_array_elements.py +0 -130
  120. simtools/db/db_from_repo_handler.py +0 -106
  121. {gammasimtools-0.8.2.dist-info → gammasimtools-0.10.0.dist-info}/LICENSE +0 -0
  122. {gammasimtools-0.8.2.dist-info → gammasimtools-0.10.0.dist-info}/top_level.txt +0 -0
@@ -1,13 +1,4 @@
1
- """
2
- Provides functionality to evaluate statistical uncertainties from DL2 MC event files.
3
-
4
- Classes
5
- -------
6
- StatisticalErrorEvaluator
7
- Handles error calculation for given DL2 MC event files and specified metrics.
8
-
9
-
10
- """
1
+ """Evaluate statistical uncertainties from DL2 MC event files."""
11
2
 
12
3
  import logging
13
4
 
@@ -15,7 +6,7 @@ import numpy as np
15
6
  from astropy import units as u
16
7
  from astropy.io import fits
17
8
 
18
- _logger = logging.getLogger(__name__)
9
+ __all__ = ["StatisticalErrorEvaluator"]
19
10
 
20
11
 
21
12
  class StatisticalErrorEvaluator:
@@ -41,26 +32,13 @@ class StatisticalErrorEvaluator:
41
32
  metrics: dict[str, float],
42
33
  grid_point: tuple[float, float, float, float, float] | None = None,
43
34
  ):
44
- """
45
- Init the evaluator with a DL2 MC event file, its type, and metrics to calculate.
46
-
47
- Parameters
48
- ----------
49
- file_path : str
50
- The path to the DL2 MC event file.
51
- file_type : str
52
- The type of the file ('point-like' or 'cone').
53
- metrics : dict, optional
54
- Dictionary specifying which metrics to compute and their reference values.
55
- grid_point : tuple, optional
56
- Tuple specifying the grid point (energy, azimuth, zenith, NSB, offset).
57
- """
58
- self.file_path = file_path
35
+ """Init the evaluator with a DL2 MC event file, its type, and metrics to calculate."""
36
+ self._logger = logging.getLogger(__name__)
59
37
  self.file_type = file_type
60
38
  self.metrics = metrics
61
39
  self.grid_point = grid_point
62
40
 
63
- self.data = self.load_data_from_file()
41
+ self.data = self.load_data_from_file(file_path)
64
42
 
65
43
  self.uncertainty_effective_area = None
66
44
  self.energy_estimate = None
@@ -70,7 +48,59 @@ class StatisticalErrorEvaluator:
70
48
  self.metric_results = None
71
49
  self.energy_threshold = None
72
50
 
73
- def load_data_from_file(self):
51
+ def _load_event_data(self, hdul, data_type):
52
+ """
53
+ Load data and units for the event and simulated data data.
54
+
55
+ Parameters
56
+ ----------
57
+ hdul : HDUList
58
+ The HDUList object.
59
+ data_type: str
60
+ The type of data to load ('EVENTS' or 'SIMULATED EVENTS').
61
+
62
+ Returns
63
+ -------
64
+ dict
65
+ Dictionary containing units for the event data.
66
+ """
67
+ _data = hdul[data_type].data # pylint: disable=E1101
68
+ _header = hdul[data_type].header # pylint: disable=E1101
69
+ _units = {}
70
+ for idx, col_name in enumerate(_data.columns.names, start=1):
71
+ unit_key = f"TUNIT{idx}"
72
+ if unit_key in _header:
73
+ _units[col_name] = u.Unit(_header[unit_key])
74
+ else:
75
+ _units[col_name] = None
76
+ return _data, _units
77
+
78
+ def _set_grid_point(self, events_data):
79
+ """Set azimuth/zenith angle of grid point."""
80
+ unique_azimuths = np.unique(events_data["PNT_AZ"]) * u.deg
81
+ unique_zeniths = 90 * u.deg - np.unique(events_data["PNT_ALT"]) * u.deg
82
+ if len(unique_azimuths) > 1 or len(unique_zeniths) > 1:
83
+ msg = (
84
+ f"Multiple values found for azimuth ({unique_azimuths}) "
85
+ f"zenith ({unique_zeniths})."
86
+ )
87
+ self._logger.error(msg)
88
+ raise ValueError(msg)
89
+ if self.grid_point is not None:
90
+ self._logger.warning(
91
+ f"Grid point already set to: {self.grid_point}. "
92
+ "Overwriting with new values from file."
93
+ )
94
+ self.grid_point = (
95
+ 1 * u.TeV,
96
+ unique_azimuths[0],
97
+ unique_zeniths[0],
98
+ 0,
99
+ 0 * u.deg,
100
+ )
101
+ self._logger.info(f"Grid point values: {self.grid_point}")
102
+
103
+ def load_data_from_file(self, file_path):
74
104
  """
75
105
  Load data from the DL2 MC event file and return dictionaries with units.
76
106
 
@@ -81,91 +111,24 @@ class StatisticalErrorEvaluator:
81
111
  """
82
112
  data = {}
83
113
  try:
84
- with fits.open(self.file_path) as hdul:
85
- events_data = hdul["EVENTS"].data # pylint: disable=E1101
86
- sim_events_data = hdul["SIMULATED EVENTS"].data # pylint: disable=E1101
87
- event_units = {}
88
- for idx, col_name in enumerate(events_data.columns.names, start=1):
89
- unit_key = f"TUNIT{idx}"
90
- if unit_key in hdul["EVENTS"].header: # pylint: disable=E1101
91
- event_units[col_name] = u.Unit(
92
- hdul["EVENTS"].header[unit_key] # pylint: disable=E1101
93
- )
94
- else:
95
- event_units[col_name] = None
96
-
97
- sim_units = {}
98
- for idx, col_name in enumerate(sim_events_data.columns.names, start=1):
99
- unit_key = f"TUNIT{idx}"
100
- if unit_key in hdul["SIMULATED EVENTS"].header: # pylint: disable=E1101
101
- sim_units[col_name] = u.Unit(
102
- hdul["SIMULATED EVENTS"].header[unit_key] # pylint: disable=E1101
103
- )
104
- else:
105
- sim_units[col_name] = None
106
- # dl2 files are required to have units for these entries
107
- event_energies_reco = events_data["ENERGY"] * event_units["ENERGY"]
108
-
109
- event_energies_mc = events_data["MC_ENERGY"] * event_units["MC_ENERGY"]
110
-
111
- bin_edges_low = sim_events_data["MC_ENERG_LO"] * sim_units["MC_ENERG_LO"]
112
-
113
- bin_edges_high = sim_events_data["MC_ENERG_HI"] * sim_units["MC_ENERG_HI"]
114
-
115
- simulated_event_histogram = sim_events_data["EVENTS"] * u.count
116
-
117
- viewcone = hdul[3].data["viewcone"][0][1] # pylint: disable=E1101
118
- core_range = hdul[3].data["core_range"][0][1] # pylint: disable=E1101
114
+ with fits.open(file_path) as hdul:
115
+ events_data, event_units = self._load_event_data(hdul, "EVENTS")
116
+ sim_events_data, sim_units = self._load_event_data(hdul, "SIMULATED EVENTS")
119
117
 
120
118
  data = {
121
- "event_energies_reco": event_energies_reco,
122
- "event_energies_mc": event_energies_mc,
123
- "bin_edges_low": bin_edges_low,
124
- "bin_edges_high": bin_edges_high,
125
- "simulated_event_histogram": simulated_event_histogram,
126
- "viewcone": viewcone,
127
- "core_range": core_range,
119
+ "event_energies_reco": events_data["ENERGY"] * event_units["ENERGY"],
120
+ "event_energies_mc": events_data["MC_ENERGY"] * event_units["MC_ENERGY"],
121
+ "bin_edges_low": sim_events_data["MC_ENERG_LO"] * sim_units["MC_ENERG_LO"],
122
+ "bin_edges_high": sim_events_data["MC_ENERG_HI"] * sim_units["MC_ENERG_HI"],
123
+ "simulated_event_histogram": sim_events_data["EVENTS"] * u.count,
124
+ "viewcone": hdul[3].data["viewcone"][0][1], # pylint: disable=E1101
125
+ "core_range": hdul[3].data["core_range"][0][1], # pylint: disable=E1101
128
126
  }
129
- unique_azimuths = np.unique(events_data["PNT_AZ"]) * u.deg
130
- unique_zeniths = 90 * u.deg - np.unique(events_data["PNT_ALT"]) * u.deg
131
- if self.grid_point is None:
132
- _logger.info(f"Unique azimuths: {unique_azimuths}")
133
- _logger.info(f"Unique zeniths: {unique_zeniths}")
134
-
135
- if len(unique_azimuths) == 1 and len(unique_zeniths) == 1:
136
- _logger.info(
137
- f"Setting initial grid point with azimuth: {unique_azimuths[0]}"
138
- f" zenith: {unique_zeniths[0]}"
139
- )
140
- self.grid_point = (
141
- 1 * u.TeV,
142
- unique_azimuths[0],
143
- unique_zeniths[0],
144
- 0,
145
- 0 * u.deg,
146
- ) # Initialize grid point with azimuth and zenith
147
- else:
148
- msg = "Multiple unique values found for azimuth or zenith."
149
- _logger.error(msg)
150
- raise ValueError(msg)
151
- else:
152
- _logger.warning(
153
- f"Grid point already set to: {self.grid_point}. "
154
- "Overwriting with new values from file."
155
- )
156
-
157
- self.grid_point = (
158
- 1 * u.TeV,
159
- unique_azimuths[0],
160
- unique_zeniths[0],
161
- 0,
162
- 0 * u.deg,
163
- )
164
- _logger.info(f"New grid point values: {self.grid_point}")
127
+ self._set_grid_point(events_data)
165
128
 
166
129
  except FileNotFoundError as e:
167
- error_message = f"Error loading file {self.file_path}: {e}"
168
- _logger.error(error_message)
130
+ error_message = f"Error loading file {file_path}: {e}"
131
+ self._logger.error(error_message)
169
132
  raise FileNotFoundError(error_message) from e
170
133
  return data
171
134
 
@@ -183,9 +146,9 @@ class StatisticalErrorEvaluator:
183
146
  bin_edges = np.concatenate([bin_edges_low, [bin_edges_high[-1]]])
184
147
  return np.unique(bin_edges)
185
148
 
186
- def compute_triggered_event_histogram(self, event_energies_reco, bin_edges):
149
+ def compute_reconstructed_event_histogram(self, event_energies_reco, bin_edges):
187
150
  """
188
- Compute histogram for triggered events.
151
+ Compute histogram of events as function of reconstructed energy.
189
152
 
190
153
  Parameters
191
154
  ----------
@@ -196,24 +159,26 @@ class StatisticalErrorEvaluator:
196
159
 
197
160
  Returns
198
161
  -------
199
- triggered_event_histogram : array
200
- Histogram of triggered events.
162
+ reconstructed_event_histogram : array
163
+ Histogram of reconstructed events.
201
164
  """
202
165
  event_energies_reco = event_energies_reco.to(bin_edges.unit)
203
166
 
204
- triggered_event_histogram, _ = np.histogram(event_energies_reco.value, bins=bin_edges.value)
205
- return triggered_event_histogram * u.count
167
+ reconstructed_event_histogram, _ = np.histogram(
168
+ event_energies_reco.value, bins=bin_edges.value
169
+ )
170
+ return reconstructed_event_histogram * u.count
206
171
 
207
- def compute_efficiency_and_errors(self, triggered_event_counts, simulated_event_counts):
172
+ def compute_efficiency_and_errors(self, reconstructed_event_counts, simulated_event_counts):
208
173
  """
209
- Compute trigger efficiency and its statistical error using the binomial distribution.
174
+ Compute reconstructed event efficiency and its uncertainty assuming binomial distribution.
210
175
 
211
176
  Parameters
212
177
  ----------
213
- triggered_event_counts : array with units
214
- Histogram counts of the triggered events.
178
+ reconstructed_event_counts : array with units
179
+ Histogram counts of reconstructed events.
215
180
  simulated_event_counts : array with units
216
- Histogram counts of the simulated events.
181
+ Histogram counts of simulated events.
217
182
 
218
183
  Returns
219
184
  -------
@@ -223,32 +188,39 @@ class StatisticalErrorEvaluator:
223
188
  Array of relative uncertainties.
224
189
  """
225
190
  # Ensure the inputs have compatible units
226
- triggered_event_counts = triggered_event_counts.to(u.ct)
227
- simulated_event_counts = simulated_event_counts.to(u.ct)
191
+ reconstructed_event_counts = (
192
+ reconstructed_event_counts.to(u.ct)
193
+ if isinstance(reconstructed_event_counts, u.Quantity)
194
+ else reconstructed_event_counts * u.ct
195
+ )
196
+ simulated_event_counts = (
197
+ simulated_event_counts.to(u.ct)
198
+ if isinstance(simulated_event_counts, u.Quantity)
199
+ else simulated_event_counts * u.ct
200
+ )
201
+
202
+ if np.any(reconstructed_event_counts > simulated_event_counts):
203
+ raise ValueError("Reconstructed event counts exceed simulated event counts.")
228
204
 
229
205
  # Compute efficiencies, ensuring the output is dimensionless
230
206
  efficiencies = np.divide(
231
- triggered_event_counts,
207
+ reconstructed_event_counts,
232
208
  simulated_event_counts,
233
- out=np.zeros_like(triggered_event_counts),
209
+ out=np.zeros_like(reconstructed_event_counts),
234
210
  where=simulated_event_counts > 0,
235
211
  ).to(u.dimensionless_unscaled)
236
212
 
237
213
  # Set up a mask for valid data with a unit-consistent threshold
238
- if np.any(triggered_event_counts > simulated_event_counts):
239
- raise ValueError(
240
- "Triggered event counts exceed simulated event counts. Please check input data."
241
- )
242
- valid = (simulated_event_counts > 0 * u.ct) & (triggered_event_counts > 0 * u.ct)
214
+ valid = (simulated_event_counts > 0) & (reconstructed_event_counts > 0)
243
215
 
244
- uncertainties = np.zeros_like(triggered_event_counts.value) * u.dimensionless_unscaled
216
+ uncertainties = np.zeros_like(reconstructed_event_counts.value) * u.dimensionless_unscaled
245
217
 
246
218
  if np.any(valid):
247
219
  uncertainties[valid] = np.sqrt(
248
220
  np.maximum(
249
221
  simulated_event_counts[valid]
250
- / triggered_event_counts[valid]
251
- * (1 - triggered_event_counts[valid] / simulated_event_counts[valid]),
222
+ / reconstructed_event_counts[valid]
223
+ * (1 - reconstructed_event_counts[valid] / simulated_event_counts[valid]),
252
224
  0,
253
225
  )
254
226
  )
@@ -273,13 +245,13 @@ class StatisticalErrorEvaluator:
273
245
  Energy threshold value.
274
246
  """
275
247
  bin_edges = self.create_bin_edges()
276
- triggered_event_histogram = self.compute_triggered_event_histogram(
248
+ reconstructed_event_histogram = self.compute_reconstructed_event_histogram(
277
249
  self.data["event_energies_mc"], bin_edges
278
250
  )
279
251
  simulated_event_histogram = self.data["simulated_event_histogram"]
280
252
 
281
253
  efficiencies, _ = self.compute_efficiency_and_errors(
282
- triggered_event_histogram, simulated_event_histogram
254
+ reconstructed_event_histogram, simulated_event_histogram
283
255
  )
284
256
 
285
257
  # Determine the effective area threshold (10% of max effective area)
@@ -302,12 +274,12 @@ class StatisticalErrorEvaluator:
302
274
  Dictionary with uncertainties for the file.
303
275
  """
304
276
  bin_edges = self.create_bin_edges()
305
- triggered_event_histogram = self.compute_triggered_event_histogram(
277
+ reconstructed_event_histogram = self.compute_reconstructed_event_histogram(
306
278
  self.data["event_energies_mc"], bin_edges
307
279
  )
308
280
  simulated_event_histogram = self.data["simulated_event_histogram"]
309
281
  _, relative_errors = self.compute_efficiency_and_errors(
310
- triggered_event_histogram, simulated_event_histogram
282
+ reconstructed_event_histogram, simulated_event_histogram
311
283
  )
312
284
  return {"relative_errors": relative_errors}
313
285
 
@@ -326,7 +298,10 @@ class StatisticalErrorEvaluator:
326
298
  event_energies_mc = self.data["event_energies_mc"]
327
299
 
328
300
  if len(event_energies_reco) != len(event_energies_mc):
329
- raise ValueError(f"Mismatch in the number of energies for file {self.file_path}")
301
+ raise ValueError(
302
+ f"Mismatch in the number of energies: {len(event_energies_reco)} vs "
303
+ f"{len(event_energies_mc)}"
304
+ )
330
305
 
331
306
  energy_deviation = (event_energies_reco - event_energies_mc) / event_energies_mc
332
307
 
@@ -354,12 +329,12 @@ class StatisticalErrorEvaluator:
354
329
 
355
330
  self.uncertainty_effective_area = self.calculate_uncertainty_effective_area()
356
331
  if self.uncertainty_effective_area:
357
- validity_range = self.metrics.get("uncertainty_effective_area", {}).get(
358
- "valid_range"
332
+ energy_range = self.metrics.get("uncertainty_effective_area", {}).get(
333
+ "energy_range"
359
334
  )
360
- min_energy, max_energy = validity_range["value"][0] * u.Unit(
361
- validity_range["unit"]
362
- ), validity_range["value"][1] * u.Unit(validity_range["unit"])
335
+ min_energy, max_energy = energy_range["value"][0] * u.Unit(
336
+ energy_range["unit"]
337
+ ), energy_range["value"][1] * u.Unit(energy_range["unit"])
363
338
 
364
339
  valid_errors = [
365
340
  error
@@ -375,7 +350,7 @@ class StatisticalErrorEvaluator:
375
350
  ref_value = self.metrics.get("uncertainty_effective_area", {}).get("target_error")[
376
351
  "value"
377
352
  ]
378
- _logger.info(
353
+ self._logger.info(
379
354
  f"Effective Area Error (max in validity range): "
380
355
  f"{self.uncertainty_effective_area['max_error'].value:.6f}, "
381
356
  f"Reference: {ref_value:.3f}"
@@ -386,7 +361,7 @@ class StatisticalErrorEvaluator:
386
361
  self.calculate_energy_estimate()
387
362
  )
388
363
  ref_value = self.metrics.get("energy_estimate", {}).get("target_error")["value"]
389
- _logger.info(
364
+ self._logger.info(
390
365
  f"Energy Estimate Error: {self.energy_estimate:.3f}, Reference: {ref_value:.3f}"
391
366
  )
392
367
  else:
@@ -442,7 +417,7 @@ class StatisticalErrorEvaluator:
442
417
  overall_max_errors[metric_name] = result
443
418
  else:
444
419
  raise ValueError(f"Unsupported result type for {metric_name}: {type(result)}")
445
- _logger.info(f"overall_max_errors {overall_max_errors}")
420
+ self._logger.info(f"overall_max_errors {overall_max_errors}")
446
421
  all_max_errors = list(overall_max_errors.values())
447
422
  if metric == "average":
448
423
  overall_metric = np.mean(all_max_errors)
@@ -6,16 +6,10 @@ which scales the number of events for both the entire dataset and specific grid
6
6
  Scaling factors are calculated using error metrics and the evaluator's results.
7
7
  """
8
8
 
9
- import logging
10
-
11
9
  import astropy.units as u
12
10
  import numpy as np
13
11
 
14
- from simtools.production_configuration.calculate_statistical_errors_grid_point import (
15
- StatisticalErrorEvaluator,
16
- )
17
-
18
- _logger = logging.getLogger(__name__)
12
+ __all__ = ["EventScaler"]
19
13
 
20
14
 
21
15
  class EventScaler:
@@ -25,7 +19,7 @@ class EventScaler:
25
19
  Supports scaling both the entire dataset and specific grid points like energy values.
26
20
  """
27
21
 
28
- def __init__(self, evaluator: StatisticalErrorEvaluator, science_case: str, metrics: dict):
22
+ def __init__(self, evaluator, science_case: str, metrics: dict):
29
23
  """
30
24
  Initialize the EventScaler with the evaluator, science case, and metrics.
31
25
 
@@ -46,22 +40,18 @@ class EventScaler:
46
40
  """
47
41
  Calculate the scaled number of events based on statistical error metrics.
48
42
 
49
- If `return_sum` is `True`, the method returns the sum of scaled events for the entire
50
- dataset. If `return_sum` is `False`, it returns the scaled number of events for each
51
- grid point (e.g., along the energy axis).
52
-
53
43
  Parameters
54
44
  ----------
55
45
  return_sum : bool, optional
56
- If `True`, returns the sum of scaled events for the entire dataset. If `False`,
57
- returns the scaled events for each grid point along the energy axis. Default is `True`.
46
+ If True, returns the sum of scaled events for the entire set of MC events. If False,
47
+ returns the scaled events for each grid point along the energy axis. Default is True.
58
48
 
59
49
  Returns
60
50
  -------
61
51
  u.Quantity
62
- If `return_sum` is `True`, returns the total scaled number of events as a `u.Quantity`.
63
- If `return_sum` is `False`, returns an array of scaled events along the energy axis as
64
- a `u.Quantity`.
52
+ If 'return_sum' is True, returns the total scaled number of events as a u.Quantity.
53
+ If 'return_sum' is False, returns an array of scaled events along the energy axis as
54
+ a u.Quantity.
65
55
  """
66
56
  scaling_factor = self._compute_scaling_factor()
67
57
 
@@ -1,33 +1,11 @@
1
- """
2
- Configures and generates simulation parameters for a specific grid point.
3
-
4
- Used to configure and generate simulation parameters for a specific grid point
5
- based on statistical uncertainties.
6
- The class considers parameters, such as azimuth, elevation, and night sky background,
7
- to compute core scatter area, viewcone, and the required number of simulated events.
8
-
9
- Key Components:
10
- ---------------
11
- - `SimulationConfig`: Main class to handle simulation configuration for a grid point.
12
- - Attributes:
13
- - `grid_point` (dict): Contains azimuth, elevation, and night sky background.
14
- - `ctao_data_level` (str): The data level for the simulation (e.g., 'A', 'B', 'C').
15
- - `science_case` (str): The science case for the simulation.
16
- - `file_path` (str): Path to the DL2 MC event file
17
- used for statistical error evaluation.
18
- - `file_type` (str): Type of the DL2 MC event file ('point-like' or 'cone').
19
- - `metrics` (dict, optional): Dictionary of metrics to evaluate.
20
-
21
- """
22
-
23
- import logging
1
+ """Derives simulation configuration parameters for a grid point based on several metrics."""
24
2
 
25
3
  from simtools.production_configuration.calculate_statistical_errors_grid_point import (
26
4
  StatisticalErrorEvaluator,
27
5
  )
28
6
  from simtools.production_configuration.event_scaler import EventScaler
29
7
 
30
- _logger = logging.getLogger(__name__)
8
+ __all__ = ["SimulationConfig"]
31
9
 
32
10
 
33
11
  class SimulationConfig:
@@ -80,16 +58,11 @@ class SimulationConfig:
80
58
  A dictionary with simulation parameters such as core scatter area,
81
59
  viewcone, and number of simulated events.
82
60
  """
83
- core_scatter_area = self._calculate_core_scatter_area()
84
- viewcone = self._calculate_viewcone()
85
- number_of_events = self.calculate_required_events()
86
-
87
61
  self.simulation_params = {
88
- "core_scatter_area": core_scatter_area,
89
- "viewcone": viewcone,
90
- "number_of_events": number_of_events,
62
+ "core_scatter_area": self._calculate_core_scatter_area(),
63
+ "viewcone": self._calculate_viewcone(),
64
+ "number_of_events": self.calculate_required_events(),
91
65
  }
92
-
93
66
  return self.simulation_params
94
67
 
95
68
  def calculate_required_events(self) -> int:
@@ -4,18 +4,15 @@ import astropy.units as u
4
4
  import numpy as np
5
5
  from scipy.interpolate import griddata
6
6
 
7
- from simtools.production_configuration.calculate_statistical_errors_grid_point import (
8
- StatisticalErrorEvaluator,
9
- )
10
7
  from simtools.production_configuration.event_scaler import EventScaler
11
8
 
9
+ __all__ = ["InterpolationHandler"]
10
+
12
11
 
13
12
  class InterpolationHandler:
14
13
  """Handle interpolation between multiple StatisticalErrorEvaluator instances."""
15
14
 
16
- def __init__(
17
- self, evaluators: list["StatisticalErrorEvaluator"], science_case: str, metrics: dict
18
- ):
15
+ def __init__(self, evaluators, science_case: str, metrics: dict):
19
16
  self.evaluators = evaluators
20
17
  self.science_case = science_case
21
18
  self.metrics = metrics
@@ -157,9 +154,9 @@ class InterpolationHandler:
157
154
 
158
155
  return interpolated_threshold.item()
159
156
 
160
- def plot_comparison(self, evaluator: "StatisticalErrorEvaluator"):
157
+ def plot_comparison(self, evaluator):
161
158
  """
162
- Plot a comparison between the simulated, scaled, and triggered events.
159
+ Plot a comparison between the simulated, scaled, and reconstructed events.
163
160
 
164
161
  Parameters
165
162
  ----------
@@ -184,14 +181,14 @@ class InterpolationHandler:
184
181
 
185
182
  plt.plot(midpoints, evaluator.scaled_events, label="Scaled")
186
183
 
187
- triggered_event_histogram, _ = np.histogram(
184
+ reconstructed_event_histogram, _ = np.histogram(
188
185
  evaluator.data["event_energies_reco"], bins=evaluator.data["bin_edges_low"]
189
186
  )
190
- plt.plot(midpoints[:-1], triggered_event_histogram, label="Triggered")
187
+ plt.plot(midpoints[:-1], reconstructed_event_histogram, label="Reconstructed")
191
188
 
192
189
  plt.legend()
193
190
  plt.xscale("log")
194
191
  plt.xlabel("Energy (Midpoint of Bin Edges)")
195
192
  plt.ylabel("Event Count")
196
- plt.title("Comparison of Simulated, Scaled, and Triggered Events")
193
+ plt.title("Comparison of Simulated, scaled, and reconstructed events")
197
194
  plt.show()
@@ -138,43 +138,63 @@ class MirrorPanelPSF:
138
138
  self.rnda_opt, save_figures=save_figures
139
139
  )
140
140
 
141
- def _optimize_reflection_angle(self, step_size=0.1):
142
- """Optimize the random reflection angle to minimize the difference in D80 containment."""
141
+ def _optimize_reflection_angle(self, step_size=0.1, max_iteration=100):
142
+ """
143
+ Optimize the random reflection angle to minimize the difference in D80 containment.
144
+
145
+ Parameters
146
+ ----------
147
+ step_size: float
148
+ Initial step size for optimization.
149
+ max_iteration: int
150
+ Maximum number of iterations.
151
+
152
+ Raises
153
+ ------
154
+ ValueError
155
+ If the optimization reaches the maximum number of iterations without converging.
156
+
157
+ """
158
+ relative_tolerance_d80 = self.args_dict["rtol_psf_containment"]
159
+ self._logger.info(
160
+ "Optimizing random reflection angle "
161
+ f"(relative tolerance = {relative_tolerance_d80}, "
162
+ f"step size = {step_size}, max iteration = {max_iteration})"
163
+ )
143
164
 
144
165
  def collect_results(rnda, mean, sig):
145
166
  self.results_rnda.append(rnda)
146
167
  self.results_mean.append(mean)
147
168
  self.results_sig.append(sig)
148
169
 
149
- stop = False
150
- mean_d80, sig_d80 = self.run_simulations_and_analysis(self.rnda_start)
170
+ reference_d80 = self.args_dict["psf_measurement_containment_mean"]
151
171
  rnda = self.rnda_start
152
- sign_delta = np.sign(mean_d80 - self.args_dict["psf_measurement_containment_mean"])
153
- collect_results(rnda, mean_d80, sig_d80)
154
- while not stop:
155
- rnda = rnda - (step_size * self.rnda_start * sign_delta)
156
- if rnda < 0:
157
- rnda = 0
158
- collect_results(rnda, mean_d80, sig_d80)
159
- break
172
+ prev_error_d80 = float("inf")
173
+ iteration = 0
174
+
175
+ while True:
160
176
  mean_d80, sig_d80 = self.run_simulations_and_analysis(rnda)
161
- new_sign_delta = np.sign(mean_d80 - self.args_dict["psf_measurement_containment_mean"])
162
- stop = new_sign_delta != sign_delta
163
- sign_delta = new_sign_delta
177
+ error_d80 = abs(1 - mean_d80 / reference_d80)
164
178
  collect_results(rnda, mean_d80, sig_d80)
165
179
 
166
- self._interpolate_optimal_rnda()
180
+ if error_d80 < relative_tolerance_d80:
181
+ break
182
+
183
+ if mean_d80 < reference_d80:
184
+ rnda += step_size * self.rnda_start
185
+ else:
186
+ rnda -= step_size * self.rnda_start
167
187
 
168
- def _interpolate_optimal_rnda(self):
169
- """Interpolate to find the optimal random reflection angle."""
170
- self.results_rnda, self.results_mean, self.results_sig = gen.sort_arrays(
171
- self.results_rnda, self.results_mean, self.results_sig
172
- )
173
- self.rnda_opt = np.interp(
174
- x=self.args_dict["psf_measurement_containment_mean"],
175
- xp=self.results_mean,
176
- fp=self.results_rnda,
177
- )
188
+ if error_d80 >= prev_error_d80:
189
+ step_size = step_size / 2
190
+ prev_error_d80 = error_d80
191
+ iteration += 1
192
+ if iteration > max_iteration:
193
+ raise ValueError(
194
+ f"Maximum iterations ({max_iteration}) reached without convergence."
195
+ )
196
+
197
+ self.rnda_opt = rnda
178
198
 
179
199
  def _get_starting_value(self):
180
200
  """Get optimization starting value from command line or previous model."""
@@ -275,6 +295,6 @@ class MirrorPanelPSF:
275
295
  )
276
296
  writer.ModelDataWriter.dump(
277
297
  args_dict=self.args_dict,
278
- metadata=MetadataCollector(args_dict=self.args_dict).top_level_meta,
298
+ metadata=MetadataCollector(args_dict=self.args_dict).get_top_level_metadata(),
279
299
  product_data=result_table,
280
300
  )