gammasimtools 0.16.0__py3-none-any.whl → 0.18.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. {gammasimtools-0.16.0.dist-info → gammasimtools-0.18.0.dist-info}/METADATA +5 -2
  2. {gammasimtools-0.16.0.dist-info → gammasimtools-0.18.0.dist-info}/RECORD +82 -74
  3. {gammasimtools-0.16.0.dist-info → gammasimtools-0.18.0.dist-info}/WHEEL +1 -1
  4. {gammasimtools-0.16.0.dist-info → gammasimtools-0.18.0.dist-info}/entry_points.txt +4 -1
  5. simtools/_version.py +2 -2
  6. simtools/applications/db_add_simulation_model_from_repository_to_db.py +10 -1
  7. simtools/applications/derive_ctao_array_layouts.py +5 -5
  8. simtools/applications/derive_mirror_rnda.py +1 -1
  9. simtools/applications/generate_simtel_event_data.py +128 -46
  10. simtools/applications/merge_tables.py +102 -0
  11. simtools/applications/plot_array_layout.py +145 -258
  12. simtools/applications/plot_tabular_data.py +12 -1
  13. simtools/applications/plot_tabular_data_for_model_parameter.py +103 -0
  14. simtools/applications/production_derive_corsika_limits.py +78 -225
  15. simtools/applications/production_derive_statistics.py +77 -43
  16. simtools/applications/simulate_light_emission.py +1 -0
  17. simtools/applications/simulate_prod.py +30 -18
  18. simtools/applications/simulate_prod_htcondor_generator.py +0 -1
  19. simtools/applications/submit_array_layouts.py +93 -0
  20. simtools/applications/verify_simulation_model_production_tables.py +52 -0
  21. simtools/camera/camera_efficiency.py +3 -3
  22. simtools/configuration/commandline_parser.py +30 -35
  23. simtools/configuration/configurator.py +0 -4
  24. simtools/constants.py +2 -0
  25. simtools/corsika/corsika_config.py +17 -12
  26. simtools/corsika/primary_particle.py +46 -13
  27. simtools/data_model/metadata_collector.py +7 -3
  28. simtools/data_model/schema.py +15 -1
  29. simtools/db/db_handler.py +16 -11
  30. simtools/db/db_model_upload.py +2 -2
  31. simtools/io_operations/io_handler.py +2 -2
  32. simtools/io_operations/io_table_handler.py +345 -0
  33. simtools/job_execution/htcondor_script_generator.py +2 -2
  34. simtools/job_execution/job_manager.py +7 -121
  35. simtools/layout/array_layout_utils.py +389 -0
  36. simtools/model/array_model.py +10 -1
  37. simtools/model/model_repository.py +134 -0
  38. simtools/production_configuration/{calculate_statistical_errors_grid_point.py → calculate_statistical_uncertainties_grid_point.py} +101 -112
  39. simtools/production_configuration/derive_corsika_limits.py +239 -111
  40. simtools/production_configuration/derive_corsika_limits_grid.py +232 -0
  41. simtools/production_configuration/derive_production_statistics.py +57 -26
  42. simtools/production_configuration/derive_production_statistics_handler.py +70 -37
  43. simtools/production_configuration/interpolation_handler.py +296 -94
  44. simtools/ray_tracing/ray_tracing.py +7 -6
  45. simtools/reporting/docs_read_parameters.py +104 -62
  46. simtools/resources/array-element-ids.json +126 -0
  47. simtools/runners/corsika_simtel_runner.py +4 -1
  48. simtools/runners/runner_services.py +5 -4
  49. simtools/schemas/model_parameter_and_data_schema.metaschema.yml +5 -1
  50. simtools/schemas/model_parameters/atmospheric_profile.schema.yml +41 -0
  51. simtools/schemas/model_parameters/atmospheric_transmission.schema.yml +43 -0
  52. simtools/schemas/model_parameters/camera_filter.schema.yml +10 -0
  53. simtools/schemas/model_parameters/camera_filter_incidence_angle.schema.yml +10 -0
  54. simtools/schemas/model_parameters/discriminator_pulse_shape.schema.yml +31 -0
  55. simtools/schemas/model_parameters/dsum_threshold.schema.yml +41 -0
  56. simtools/schemas/model_parameters/fadc_pulse_shape.schema.yml +12 -0
  57. simtools/schemas/model_parameters/lightguide_efficiency_vs_incidence_angle.schema.yml +10 -0
  58. simtools/schemas/model_parameters/mirror_reflectivity.schema.yml +10 -0
  59. simtools/schemas/model_parameters/nsb_reference_spectrum.schema.yml +12 -0
  60. simtools/schemas/model_parameters/pm_photoelectron_spectrum.schema.yml +19 -0
  61. simtools/schemas/model_parameters/quantum_efficiency.schema.yml +10 -0
  62. simtools/schemas/plot_configuration.metaschema.yml +46 -57
  63. simtools/schemas/production_configuration_metrics.schema.yml +2 -2
  64. simtools/simtel/simtel_config_writer.py +34 -14
  65. simtools/simtel/simtel_io_event_reader.py +301 -194
  66. simtools/simtel/simtel_io_event_writer.py +237 -221
  67. simtools/simtel/simtel_io_file_info.py +9 -4
  68. simtools/simtel/simtel_io_metadata.py +119 -8
  69. simtools/simtel/simulator_array.py +2 -2
  70. simtools/simtel/simulator_light_emission.py +79 -34
  71. simtools/simtel/simulator_ray_tracing.py +2 -2
  72. simtools/simulator.py +101 -68
  73. simtools/testing/validate_output.py +4 -1
  74. simtools/utils/general.py +1 -3
  75. simtools/utils/names.py +76 -7
  76. simtools/visualization/plot_array_layout.py +242 -0
  77. simtools/visualization/plot_pixels.py +680 -0
  78. simtools/visualization/plot_tables.py +81 -2
  79. simtools/visualization/visualize.py +3 -219
  80. simtools/applications/production_generate_simulation_config.py +0 -152
  81. simtools/layout/ctao_array_layouts.py +0 -172
  82. simtools/production_configuration/generate_simulation_config.py +0 -158
  83. {gammasimtools-0.16.0.dist-info → gammasimtools-0.18.0.dist-info}/licenses/LICENSE +0 -0
  84. {gammasimtools-0.16.0.dist-info → gammasimtools-0.18.0.dist-info}/top_level.txt +0 -0
  85. /simtools/{schemas → resources}/array_elements.yml +0 -0
@@ -0,0 +1,232 @@
1
+ """Derive CORSIKA limits for a grid of parameters."""
2
+
3
+ import datetime
4
+ import logging
5
+
6
+ import numpy as np
7
+ from astropy.table import Column, Table
8
+
9
+ import simtools.utils.general as gen
10
+ from simtools.data_model.metadata_collector import MetadataCollector
11
+ from simtools.io_operations import io_handler
12
+ from simtools.model.site_model import SiteModel
13
+ from simtools.production_configuration.derive_corsika_limits import LimitCalculator
14
+
15
+ _logger = logging.getLogger(__name__)
16
+
17
+
18
+ def generate_corsika_limits_grid(args_dict, db_config=None):
19
+ """
20
+ Generate CORSIKA limits for a grid of parameters.
21
+
22
+ Requires at least one event data file per parameter set.
23
+
24
+ Parameters
25
+ ----------
26
+ args_dict : dict
27
+ Dictionary containing command line arguments.
28
+ db_config : dict, optional
29
+ Database configuration dictionary.
30
+ """
31
+ event_data_files = gen.get_list_of_files_from_command_line(
32
+ args_dict["event_data_files"], [".hdf5", ".gz"]
33
+ ) # accept fits.gz files (.gz)
34
+ if args_dict.get("array_layout_name"):
35
+ telescope_configs = _read_array_layouts_from_db(
36
+ args_dict["array_layout_name"],
37
+ args_dict.get("site"),
38
+ args_dict.get("model_version"),
39
+ db_config,
40
+ )
41
+ else:
42
+ telescope_configs = gen.collect_data_from_file(args_dict["telescope_ids"])[
43
+ "telescope_configs"
44
+ ]
45
+
46
+ results = []
47
+ for file_path in event_data_files:
48
+ for array_name, telescope_ids in telescope_configs.items():
49
+ _logger.info(f"Processing file: {file_path} with telescope config: {array_name}")
50
+ result = _process_file(
51
+ file_path,
52
+ array_name,
53
+ telescope_ids,
54
+ args_dict["loss_fraction"],
55
+ args_dict["plot_histograms"],
56
+ )
57
+ result["layout"] = array_name
58
+ results.append(result)
59
+
60
+ write_results(results, args_dict)
61
+
62
+
63
+ def _process_file(file_path, array_name, telescope_ids, loss_fraction, plot_histograms):
64
+ """
65
+ Compute limits for a single file.
66
+
67
+ Parameters
68
+ ----------
69
+ file_path : str
70
+ Path to the event data file.
71
+ array_name : str
72
+ Name of the telescope array configuration.
73
+ telescope_ids : list[int]
74
+ List of telescope IDs to filter the events.
75
+ loss_fraction : float
76
+ Fraction of events to be lost.
77
+ plot_histograms : bool
78
+ Whether to plot histograms.
79
+
80
+ Returns
81
+ -------
82
+ dict
83
+ Dictionary containing the computed limits and metadata.
84
+ """
85
+ calculator = LimitCalculator(file_path, array_name=array_name, telescope_list=telescope_ids)
86
+ limits = calculator.compute_limits(loss_fraction)
87
+
88
+ if plot_histograms:
89
+ calculator.plot_data(io_handler.IOHandler().get_output_directory())
90
+
91
+ return limits
92
+
93
+
94
+ def write_results(results, args_dict):
95
+ """
96
+ Write the computed limits as astropy table to file.
97
+
98
+ Parameters
99
+ ----------
100
+ results : list[dict]
101
+ List of computed limits.
102
+ args_dict : dict
103
+ Dictionary containing command line arguments.
104
+ """
105
+ table = _create_results_table(results, args_dict["loss_fraction"])
106
+
107
+ output_dir = io_handler.IOHandler().get_output_directory("corsika_limits")
108
+ output_file = output_dir / args_dict["output_file"]
109
+
110
+ table.write(output_file, format="ascii.ecsv", overwrite=True)
111
+ _logger.info(f"Results saved to {output_file}")
112
+
113
+ MetadataCollector.dump(args_dict, output_file)
114
+
115
+
116
+ def _create_results_table(results, loss_fraction):
117
+ """
118
+ Convert list of simulation results to an astropy Table with metadata.
119
+
120
+ Round values to appropriate precision and add metadata.
121
+
122
+ Parameters
123
+ ----------
124
+ results : list[dict]
125
+ Computed limits per file and telescope configuration.
126
+ loss_fraction : float
127
+ Fraction of lost events (added to metadata).
128
+
129
+ Returns
130
+ -------
131
+ astropy.table.Table
132
+ Table with computed limits.
133
+ """
134
+ cols = [
135
+ "primary_particle",
136
+ "array_name",
137
+ "telescope_ids",
138
+ "zenith",
139
+ "azimuth",
140
+ "nsb_level",
141
+ "lower_energy_limit",
142
+ "upper_radius_limit",
143
+ "viewcone_radius",
144
+ ]
145
+
146
+ columns = {name: [] for name in cols}
147
+ units = {}
148
+
149
+ for res in results:
150
+ _process_result_row(res, cols, columns, units)
151
+
152
+ table_cols = _create_table_columns(cols, columns, units)
153
+ table = Table(table_cols)
154
+
155
+ table.meta.update(
156
+ {
157
+ "created": datetime.datetime.now().isoformat(),
158
+ "description": "Lookup table for CORSIKA limits computed from simulations.",
159
+ "loss_fraction": loss_fraction,
160
+ }
161
+ )
162
+
163
+ return table
164
+
165
+
166
+ def _process_result_row(res, cols, columns, units):
167
+ """Process a single result row and add values to columns."""
168
+ for k in cols:
169
+ val = res.get(k, None)
170
+ if val is not None:
171
+ val = _round_value(k, val)
172
+ _logger.debug(f"Adding {k}: {val} to column data")
173
+
174
+ if hasattr(val, "unit"):
175
+ columns[k].append(val.value)
176
+ units[k] = val.unit
177
+ else:
178
+ columns[k].append(val)
179
+ if k not in units:
180
+ units[k] = None
181
+
182
+
183
+ def _round_value(key, val):
184
+ """Round value based on key type."""
185
+ if key == "lower_energy_limit":
186
+ return np.floor(val * 1e3) / 1e3
187
+ if key == "upper_radius_limit":
188
+ return np.ceil(val / 25) * 25
189
+ if key == "viewcone_radius":
190
+ return np.ceil(val / 0.25) * 0.25
191
+ return val
192
+
193
+
194
+ def _create_table_columns(cols, columns, units):
195
+ """Create table columns with appropriate data types."""
196
+ table_cols = []
197
+ for k in cols:
198
+ col_data = columns[k]
199
+ if any(isinstance(v, list | tuple) for v in col_data):
200
+ col = Column(data=col_data, name=k, unit=units.get(k), dtype=object)
201
+ else:
202
+ col = Column(data=col_data, name=k, unit=units.get(k))
203
+ table_cols.append(col)
204
+ return table_cols
205
+
206
+
207
+ def _read_array_layouts_from_db(layouts, site, model_version, db_config):
208
+ """
209
+ Read array layouts from the database.
210
+
211
+ Parameters
212
+ ----------
213
+ layouts : list[str]
214
+ List of layout names to read. If "all", read all available layouts.
215
+ site : str
216
+ Site name for the array layouts.
217
+ model_version : str
218
+ Model version for the array layouts.
219
+ db_config : dict
220
+ Database configuration dictionary.
221
+
222
+ Returns
223
+ -------
224
+ dict
225
+ Dictionary mapping layout names to telescope IDs.
226
+ """
227
+ site_model = SiteModel(site=site, model_version=model_version, mongo_db_config=db_config)
228
+ layout_names = site_model.get_list_of_array_layouts() if layouts == ["all"] else layouts
229
+ layout_dict = {}
230
+ for layout_name in layout_names:
231
+ layout_dict[layout_name] = site_model.get_array_elements_for_layout(layout_name)
232
+ return layout_dict
@@ -27,7 +27,7 @@ class ProductionStatisticsDerivator:
27
27
 
28
28
  Parameters
29
29
  ----------
30
- evaluator : StatisticalErrorEvaluator
30
+ evaluator : StatisticalUncertaintyEvaluator
31
31
  The evaluator responsible for calculating metrics and handling event data.
32
32
  metrics : dict
33
33
  Dictionary containing metrics, including target error for effective area.
@@ -35,6 +35,50 @@ class ProductionStatisticsDerivator:
35
35
  self.evaluator = evaluator
36
36
  self.metrics = metrics
37
37
 
38
+ def _compute_scaling_factor(self) -> np.ndarray:
39
+ """
40
+ Compute bin-wise scaling factors based on error metrics.
41
+
42
+ Takes into account the energy range specified in the metrics and
43
+ calculates a separate scaling factor for each energy bin.
44
+
45
+ Returns
46
+ -------
47
+ np.ndarray
48
+ Array of scaling factors for each energy bin.
49
+ """
50
+ uncertainty_effective_area = self.evaluator.metric_results.get("uncertainty_effective_area")
51
+ relative_uncertainties = uncertainty_effective_area.get("relative_uncertainties")
52
+ energy_range = (
53
+ self.metrics.get("uncertainty_effective_area").get("energy_range").get("value")
54
+ )
55
+ energy_unit = u.Unit(
56
+ self.metrics.get("uncertainty_effective_area").get("energy_range").get("unit")
57
+ )
58
+
59
+ energy_range_converted = np.array(energy_range) * energy_unit
60
+
61
+ bin_edges = self.evaluator.energy_bin_edges
62
+ bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
63
+
64
+ # Mask for bins within the metric specified energy range
65
+ mask = (bin_centers >= energy_range_converted[0]) & (
66
+ bin_centers <= energy_range_converted[1]
67
+ )
68
+
69
+ scaling_factors = np.zeros_like(relative_uncertainties)
70
+
71
+ target_uncertainty = (
72
+ self.metrics.get("uncertainty_effective_area").get("target_uncertainty").get("value")
73
+ )
74
+
75
+ # Calculate scaling factor only for bins within the energy range
76
+ # For bins with zero events/uncertainty, use a scaling factor of 0
77
+ valid_bins = mask & (relative_uncertainties > 0)
78
+ scaling_factors[valid_bins] = (relative_uncertainties[valid_bins] / target_uncertainty) ** 2
79
+
80
+ return scaling_factors
81
+
38
82
  def derive_statistics(self, return_sum: bool = True) -> u.Quantity:
39
83
  """
40
84
  Derive the production statistics based on statistical error metrics.
@@ -54,31 +98,15 @@ class ProductionStatisticsDerivator:
54
98
  If 'return_sum' is False, returns an array of production statistics along the energy
55
99
  axis as a u.Quantity.
56
100
  """
57
- scaling_factor = self._compute_scaling_factor()
58
-
101
+ scaling_factors = self._compute_scaling_factor()
59
102
  base_events = self._number_of_simulated_events()
103
+ # currently we use the maximum of the scaling factors to scale the events. This is a soft
104
+ # requirement if we want to keep the power law shape of the production statistics.
105
+ scaled_events = base_events * np.max(scaling_factors)
60
106
 
61
107
  if return_sum:
62
- return np.sum(base_events * scaling_factor)
63
- return base_events * scaling_factor
64
-
65
- def _compute_scaling_factor(self) -> float:
66
- """
67
- Compute the scaling factor based on the error metrics.
68
-
69
- Returns
70
- -------
71
- float
72
- The scaling factor.
73
- """
74
- metric_results = self.evaluator.calculate_metrics()
75
- uncertainty_effective_area = metric_results.get("uncertainty_effective_area")
76
- current_max_error = uncertainty_effective_area.get("max_error")
77
- target_max_error = self.metrics.get("uncertainty_effective_area").get("target_error")[
78
- "value"
79
- ]
80
-
81
- return (current_max_error / target_max_error) ** 2
108
+ return np.sum(scaled_events)
109
+ return scaled_events
82
110
 
83
111
  def _number_of_simulated_events(self) -> u.Quantity:
84
112
  """
@@ -112,13 +140,16 @@ class ProductionStatisticsDerivator:
112
140
  bin_edges = self.evaluator.create_bin_edges()
113
141
  bin_idx = np.digitize(energy, bin_edges) - 1
114
142
 
115
- scaling_factor = self._compute_scaling_factor()
143
+ # Get scaling factors for all bins
144
+ scaling_factors = self._compute_scaling_factor()
116
145
 
117
146
  simulated_event_histogram = self.evaluator.data.get("simulated_event_histogram", [])
118
147
 
119
148
  if bin_idx < 0 or bin_idx >= len(simulated_event_histogram):
120
- raise ValueError(f"Energy {energy} is outside therange of the simulated events data.")
149
+ raise ValueError(f"Energy {energy} is outside the range of the simulated events data.")
121
150
 
122
151
  base_events = self._number_of_simulated_events()
123
152
  base_event_at_energy = base_events[bin_idx]
124
- return base_event_at_energy * scaling_factor
153
+ scaling_factor_at_energy = scaling_factors[bin_idx]
154
+
155
+ return base_event_at_energy * scaling_factor_at_energy
@@ -1,11 +1,11 @@
1
1
  """
2
- Module to run the StatisticalErrorEvaluator and interpolate results.
2
+ Derives the required statistics for a requested set of production parameters through interpolation.
3
3
 
4
4
  This module provides the `ProductionStatisticsHandler` class, which manages the workflow for
5
5
  derivation of required number of events for a simulation production using pre-defined metrics.
6
6
 
7
7
  The module includes functionality to:
8
- - Initialize evaluators for statistical error calculations based on input parameters.
8
+ - Initialize evaluators for statistical uncertainty calculations based on input parameters.
9
9
  - Perform interpolation using the initialized evaluators to estimate production statistics at a
10
10
  query point.
11
11
  - Write the results of the interpolation to an output file.
@@ -17,10 +17,9 @@ import logging
17
17
  from pathlib import Path
18
18
 
19
19
  import astropy.units as u
20
- import numpy as np
21
20
 
22
- from simtools.production_configuration.calculate_statistical_errors_grid_point import (
23
- StatisticalErrorEvaluator,
21
+ from simtools.production_configuration.calculate_statistical_uncertainties_grid_point import (
22
+ StatisticalUncertaintyEvaluator,
24
23
  )
25
24
  from simtools.production_configuration.interpolation_handler import InterpolationHandler
26
25
  from simtools.utils.general import collect_data_from_file
@@ -35,7 +34,7 @@ class ProductionStatisticsHandler:
35
34
  production at a specified query point.
36
35
  """
37
36
 
38
- def __init__(self, args_dict):
37
+ def __init__(self, args_dict, output_path):
39
38
  """
40
39
  Initialize the manager with the provided arguments.
41
40
 
@@ -43,35 +42,56 @@ class ProductionStatisticsHandler:
43
42
  ----------
44
43
  args_dict : dict
45
44
  Dictionary of command-line arguments.
45
+ output_path : Path
46
+ Path to the directory where the event statistics output file will be saved.
46
47
  """
47
48
  self.args = args_dict
48
49
  self.logger = logging.getLogger(__name__)
49
- self.output_path = Path(self.args.get("output_path", "."))
50
- self.output_filepath = self.output_path.joinpath(f"{self.args['output_file']}")
50
+ self.output_path = output_path
51
51
  self.metrics = collect_data_from_file(self.args["metrics_file"])
52
52
  self.evaluator_instances = []
53
+ self.interpolation_handler = None
54
+ self.grid_points_production = self._load_grid_points_production()
55
+
56
+ def _load_grid_points_production(self):
57
+ """Load grid points from the JSON file."""
58
+ grid_points_production_file = self.args["grid_points_production_file"]
59
+ return collect_data_from_file(grid_points_production_file)
53
60
 
54
61
  def initialize_evaluators(self):
55
- """Initialize StatisticalErrorEvaluator instances for the given zeniths and offsets."""
56
- if not (self.args["base_path"] and self.args["zeniths"] and self.args["camera_offsets"]):
62
+ """Initialize StatisticalUncertaintyEvaluator instances for the given grid point."""
63
+ if not (
64
+ self.args["base_path"]
65
+ and self.args["zeniths"]
66
+ and self.args["azimuths"]
67
+ and self.args["nsb"]
68
+ and self.args["offsets"]
69
+ ):
57
70
  self.logger.warning("No files read")
58
71
  self.logger.warning(f"Base Path: {self.args['base_path']}")
59
72
  self.logger.warning(f"Zeniths: {self.args['zeniths']}")
60
- self.logger.warning(f"Camera offsets: {self.args['camera_offsets']}")
73
+ self.logger.warning(f"Camera offsets: {self.args['offsets']}")
61
74
  return
62
75
 
63
- for zenith, offset in itertools.product(self.args["zeniths"], self.args["camera_offsets"]):
64
- file_name = self.args["file_name_template"].format(zenith=int(zenith))
76
+ for zenith, azimuth, nsb, offset in itertools.product(
77
+ self.args["zeniths"], self.args["azimuths"], self.args["nsb"], self.args["offsets"]
78
+ ):
79
+ file_name = self.args["file_name_template"].format(
80
+ zenith=int(zenith),
81
+ azimuth=azimuth,
82
+ nsb=nsb,
83
+ offset=offset,
84
+ )
65
85
  file_path = Path(self.args["base_path"]).joinpath(file_name)
66
86
 
67
87
  if not file_path.exists():
68
88
  self.logger.warning(f"File not found: {file_path}. Skipping.")
69
89
  continue
70
90
 
71
- evaluator = StatisticalErrorEvaluator(
91
+ evaluator = StatisticalUncertaintyEvaluator(
72
92
  file_path,
73
93
  metrics=self.metrics,
74
- grid_point=(None, None, zenith, None, offset * u.deg),
94
+ grid_point=(None, azimuth, zenith, nsb, offset * u.deg),
75
95
  )
76
96
  evaluator.calculate_metrics()
77
97
  self.evaluator_instances.append(evaluator)
@@ -82,38 +102,51 @@ class ProductionStatisticsHandler:
82
102
  self.logger.error("No evaluators initialized. Cannot perform interpolation.")
83
103
  return None
84
104
 
85
- interpolation_handler = InterpolationHandler(self.evaluator_instances, metrics=self.metrics)
86
- query_point = self.args.get("query_point")
87
- if not query_point or len(query_point) != 5:
88
- raise ValueError(
89
- "Invalid query point format. "
90
- f"Expected 5 values, got {len(query_point) if query_point else 'None'}."
105
+ self.interpolation_handler = InterpolationHandler(
106
+ self.evaluator_instances,
107
+ metrics=self.metrics,
108
+ grid_points_production=self.grid_points_production,
109
+ )
110
+ qrid_points_with_statistics = []
111
+
112
+ interpolated_production_statistics = self.interpolation_handler.interpolate()
113
+ for grid_point, statistics in zip(
114
+ self.grid_points_production, interpolated_production_statistics
115
+ ):
116
+ qrid_points_with_statistics.append(
117
+ {
118
+ "grid_point": grid_point,
119
+ "interpolated_production_statistics": float(statistics),
120
+ }
91
121
  )
92
- query_points = np.array([self.args["query_point"]])
93
- return interpolation_handler.interpolate(query_points)
122
+ return qrid_points_with_statistics
94
123
 
95
124
  def write_output(self, production_statistics):
96
125
  """Write the derived event statistics to a file."""
97
- output_data = {
98
- "query_point": self.args["query_point"],
99
- "production_statistics": production_statistics.tolist(),
100
- }
101
- self.output_filepath.parent.mkdir(parents=True, exist_ok=True)
102
- with open(self.output_filepath, "w", encoding="utf-8") as f:
126
+ output_data = (production_statistics,)
127
+ output_filename = self.args["output_file"]
128
+ self.output_path.mkdir(parents=True, exist_ok=True)
129
+ output_file_path = self.output_path.joinpath(output_filename)
130
+ with open(output_file_path, "w", encoding="utf-8") as f:
103
131
  json.dump(output_data, f, indent=4)
104
- self.logger.info(f"Output saved to {self.output_filepath}")
105
- self.logger.info(
106
- f"production statistics for grid point "
107
- f"{self.args['query_point']}: {production_statistics}"
108
- )
132
+ self.logger.info(f"Output saved to {self.output_path}")
133
+
134
+ def plot_production_statistics_comparison(self):
135
+ """Plot the derived event statistics."""
136
+ ax = self.interpolation_handler.plot_comparison()
137
+ plot_path = self.output_path.joinpath("production_statistics_comparison.png")
138
+ plot_path.parent.mkdir(parents=True, exist_ok=True)
139
+ ax.figure.savefig(plot_path)
140
+ self.logger.info(f"Plot saved to {plot_path}")
109
141
 
110
142
  def run(self):
111
143
  """Run the scaling and interpolation workflow."""
112
- self.logger.info(f"Zeniths: {self.args['zeniths']}")
113
- self.logger.info(f"Camera offsets: {self.args['camera_offsets']}")
114
- self.logger.info(f"Query Point: {self.args['query_point']}")
144
+ self.logger.info(f"Grid Points File: {self.args['grid_points_production_file']}")
115
145
  self.logger.info(f"Metrics File: {self.args['metrics_file']}")
116
146
 
117
147
  self.initialize_evaluators()
118
148
  production_statistics = self.perform_interpolation()
149
+ if self.args.get("plot_production_statistics"):
150
+ self.plot_production_statistics_comparison()
151
+
119
152
  self.write_output(production_statistics)