pyelq 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pyelq/source_map.py ADDED
@@ -0,0 +1,115 @@
1
+ # SPDX-FileCopyrightText: 2024 Shell Global Solutions International B.V. All Rights Reserved.
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ # -*- coding: utf-8 -*-
6
+ """SourceMap module.
7
+
8
+ The class for the source maps used in pyELQ
9
+
10
+ """
11
+ from dataclasses import dataclass, field
12
+ from typing import Union
13
+
14
+ import numpy as np
15
+
16
+ from pyelq.coordinate_system import Coordinate, make_latin_hypercube
17
+ from pyelq.sensor.sensor import Sensor
18
+
19
+
20
+ @dataclass
21
+ class SourceMap:
22
+ """Defines SourceMap class.
23
+
24
+ Attributes:
25
+ location (Coordinate, optional): Coordinate object specifying the potential source locations
26
+ prior_value (np.ndarray, optional): Array with prior values for each source
27
+ inclusion_idx (np.ndarray, optional): Array of lists containing indices of the observations of a
28
+ corresponding sensor_object which are within the inclusion_radius of that particular source
29
+ inclusion_n_obs (list, optional): Array containing number of observations of a sensor_object within
30
+ radius for each source
31
+
32
+ """
33
+
34
+ location: Coordinate = field(init=False, default=None)
35
+ prior_value: np.ndarray = None
36
+ inclusion_idx: np.ndarray = field(init=False, default=None)
37
+ inclusion_n_obs: np.ndarray = field(init=False, default=None)
38
+
39
+ @property
40
+ def nof_sources(self) -> int:
41
+ """Number of sources."""
42
+ if self.location is None:
43
+ return 0
44
+ return self.location.nof_observations
45
+
46
+ def calculate_inclusion_idx(self, sensor_object: Sensor, inclusion_radius: Union[int, np.ndarray]) -> None:
47
+ """Find observation indices which are within specified radius of each source location.
48
+
49
+ This method takes the sensor object and for each source in the source_map object it calculates which
50
+ observations are within the specified radius.
51
+ When sensor_object location and sourcemap_object location are not of the same type, simply convert both to ECEF
52
+ and calculate inclusion indices accordingly.
53
+ The result is an array of lists which are the indices of the observations in sensor_object which are within the
54
+ specified radius. Result is stored in the corresponding attribute.
55
+ Also calculating number of observations in radius per source and storing result as a list in inclusion_n_obs
56
+ attribute
57
+ When a location attribute is in LLA we convert to ECEF for the inclusion radius to make sense
58
+
59
+ Args:
60
+ sensor_object (Sensor): Sensor object containing location information on the observations under
61
+ consideration
62
+ inclusion_radius (Union[float, np.ndarray], optional): Inclusion radius in [m] radius from source
63
+ for which we take observations into account
64
+
65
+ """
66
+ sensor_kd_tree = sensor_object.location.to_ecef().create_tree()
67
+ source_points = self.location.to_ecef().to_array()
68
+
69
+ inclusion_idx = sensor_kd_tree.query_ball_point(source_points, inclusion_radius)
70
+ idx_array = np.array(inclusion_idx, dtype=object)
71
+ self.inclusion_idx = idx_array
72
+ self.inclusion_n_obs = np.array([len(value) for value in self.inclusion_idx])
73
+
74
+ def generate_sources(
75
+ self,
76
+ coordinate_object: Coordinate,
77
+ sourcemap_limits: np.ndarray,
78
+ sourcemap_type: str = "central",
79
+ nof_sources: int = 5,
80
+ grid_shape: Union[tuple, np.ndarray] = (5, 5, 1),
81
+ ) -> None:
82
+ """Generates source locations based on specified inputs.
83
+
84
+ The result gets stored in the location attribute
85
+
86
+ In grid_sphere we scale the latitude and longitude from -90/90 and -180/180 to 0/1 for the use in temp_lat_rad
87
+ and temp_lon_rad
88
+
89
+ Args:
90
+ coordinate_object (Coordinate): Empty coordinate object which specifies the coordinate class to populate
91
+ location with
92
+ sourcemap_limits (np.ndarray): Limits of the sourcemap on which to generate the sources of size [dim x 2]
93
+ if dim == 2 we assume the third dimension will be zeros. Assuming the units of the limits are defined in
94
+ the desired coordinate system
95
+ sourcemap_type (str, optional): Type of sourcemap to generate: central == 1 central source,
96
+ hypercube == nof_sources through a Latin Hypercube design, grid == grid of shape grid_shape
97
+ filled with sources, grid_sphere == grid of shape grid_shape taking into account a spherical spacing
98
+ nof_sources (int, optional): Number of sources to generate (used in 'hypercube' case)
99
+ grid_shape: (tuple, optional): Number of sources to generate in each dimension, total number of
100
+ sources will be the product of the entries of this tuple (used in 'grid' and 'grid_sphere' case)
101
+
102
+ """
103
+ sourcemap_dimension = sourcemap_limits.shape[0]
104
+ if sourcemap_type == "central":
105
+ array = sourcemap_limits.mean(axis=1).reshape(1, sourcemap_dimension)
106
+ elif sourcemap_type == "hypercube":
107
+ array = make_latin_hypercube(bounds=sourcemap_limits, nof_samples=nof_sources)
108
+ elif sourcemap_type == "grid":
109
+ array = coordinate_object.make_grid(bounds=sourcemap_limits, grid_type="rectangular", shape=grid_shape)
110
+ elif sourcemap_type == "grid_sphere":
111
+ array = coordinate_object.make_grid(bounds=sourcemap_limits, grid_type="spherical", shape=grid_shape)
112
+ else:
113
+ raise NotImplementedError("Please provide a valid sourcemap type")
114
+ coordinate_object.from_array(array=array)
115
+ self.location = coordinate_object
@@ -0,0 +1,5 @@
1
+ # SPDX-FileCopyrightText: 2024 Shell Global Solutions International B.V. All Rights Reserved.
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+ """Support Functions Module."""
5
+ __all__ = ["post_processing", "spatio_temporal_interpolation"]
@@ -0,0 +1,377 @@
1
+ # SPDX-FileCopyrightText: 2024 Shell Global Solutions International B.V. All Rights Reserved.
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ # -*- coding: utf-8 -*-
6
+ """Post-processing module.
7
+
8
+ Module containing some functions used in post-processing of the results.
9
+
10
+ """
11
+ import warnings
12
+ from typing import TYPE_CHECKING, Tuple, Union
13
+
14
+ import numpy as np
15
+ import pandas as pd
16
+ from scipy.ndimage import label
17
+ from shapely import geometry
18
+
19
+ from pyelq.coordinate_system import ENU
20
+
21
+ if TYPE_CHECKING:
22
+ from pyelq.model import ELQModel
23
+
24
+
25
+ def is_regularly_spaced(array: np.ndarray, tolerance: float = 0.01, return_delta: bool = True):
26
+ """Determines whether an input array is regularly spaced, within some (absolute) tolerance.
27
+
28
+ Gets the large differences (defined by tolerance) in the array, and sees whether all of them are within 5% of one
29
+ another.
30
+
31
+ Args:
32
+ array (np.ndarray): Input array to be analysed.
33
+ tolerance (float, optional): Absolute value above which the difference between values is considered significant.
34
+ Defaults to 0.01.
35
+ return_delta (bool, optional): Whether to return the value of the regular grid spacing. Defaults to True.
36
+
37
+ Returns:
38
+ (bool): Whether the grid is regularly spaced.
39
+ (float): The value of the regular grid spacing.
40
+
41
+ """
42
+ unique_vals = np.unique(array)
43
+ diff_unique_vals = np.diff(unique_vals)
44
+ diff_big = diff_unique_vals[diff_unique_vals > tolerance]
45
+
46
+ boolean = np.all([np.isclose(diff_big[i], diff_big[i + 1], rtol=0.05) for i in range(len(diff_big) - 1)])
47
+
48
+ if return_delta:
49
+ return boolean, np.mean(diff_big)
50
+
51
+ return boolean, None
52
+
53
+
54
+ def calculate_rectangular_statistics(
55
+ model_object: "ELQModel",
56
+ bin_size_x: float = 1,
57
+ bin_size_y: float = 1,
58
+ burn_in: int = 0,
59
+ normalized_count_limit: float = 0.005,
60
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, list, pd.DataFrame]:
61
+ """Function which aggregates the pyELQ results into rectangular bins and outputs the related summary statistics.
62
+
63
+ The function creates a pixel grid (binning) in East-North coordinates based on the bin_size_x and bin_size_y
64
+ parameters. For each bin both a count as well as a weighted sum of the emission rate estimates is calculated. The
65
+ count is normalized by the number of iterations used in the MCMC and a boolean array is created which indicates if
66
+ the count is above a certain threshold. Connected pixels where the count is above this threshold are considered to
67
+ be a single blob/source and emission estimates per blob are summed over all pixels in the blob. The function
68
+ then calculates the summary statistics for each blob of estimates which are connected pixels. The summary
69
+ statistics include the median and IQR of the emission rate estimates, the mean location of the blob and the
70
+ likelihood of the blob.
71
+
72
+ Args:
73
+ model_object (ELQModel): ELQModel object containing the results of the MCMC run.
74
+ bin_size_x (float, optional): Size of the bins in the x-direction. Defaults to 1.
75
+ bin_size_y (float, optional): Size of the bins in the y-direction. Defaults to 1.
76
+ burn_in (int, optional): Number of burn-in iterations used in the MCMC. Defaults to 0.
77
+ normalized_count_limit (float, optional): Threshold for the normalized count to be considered a blob.
78
+
79
+ Returns:
80
+ result_weighted (np.ndarray): Weighted sum of the emission rate estimates in each bin.
81
+ overall_count (np.ndarray): Count of the number of estimates in each bin.
82
+ normalized_count (np.ndarray): Normalized count of the number of estimates in each bin.
83
+ count_boolean (np.ndarray): Boolean array which indicates if likelihood of pixel is over threshold.
84
+ edges_result (list): Centers of the pixels in the x and y direction.
85
+ summary_result (pd.DataFrame): Summary statistics for each blob of estimates.
86
+
87
+ """
88
+ nof_iterations = model_object.n_iter
89
+ ref_latitude = model_object.components["source"].dispersion_model.source_map.location.ref_latitude
90
+ ref_longitude = model_object.components["source"].dispersion_model.source_map.location.ref_longitude
91
+ ref_altitude = model_object.components["source"].dispersion_model.source_map.location.ref_altitude
92
+
93
+ if model_object.components["source"].reversible_jump:
94
+ all_source_locations = model_object.mcmc.store["z_src"]
95
+ else:
96
+ source_locations = (
97
+ model_object.components["source"]
98
+ .dispersion_model.source_map.location.to_enu(
99
+ ref_longitude=ref_longitude, ref_latitude=ref_latitude, ref_altitude=ref_altitude
100
+ )
101
+ .to_array()
102
+ )
103
+ all_source_locations = np.repeat(source_locations.T[:, :, np.newaxis], model_object.mcmc.n_iter, axis=2)
104
+
105
+ if np.all(np.isnan(all_source_locations[:2, :, :])):
106
+ warnings.warn("No sources found")
107
+ result_weighted = np.array([[[np.nan]]])
108
+ overall_count = np.array([[0]])
109
+ normalized_count = np.array([[0]])
110
+ count_boolean = np.array([[False]])
111
+ edges_result = [np.array([np.nan])] * 2
112
+ summary_result = return_empty_summary_dataframe()
113
+
114
+ return result_weighted, overall_count, normalized_count, count_boolean, edges_result[:2], summary_result
115
+
116
+ min_x = np.nanmin(all_source_locations[0, :, :])
117
+ max_x = np.nanmax(all_source_locations[0, :, :])
118
+ min_y = np.nanmin(all_source_locations[1, :, :])
119
+ max_y = np.nanmax(all_source_locations[1, :, :])
120
+
121
+ bin_min_x = np.floor(min_x - 0.1)
122
+ bin_max_x = np.ceil(max_x + 0.1)
123
+ bin_min_y = np.floor(min_y - 0.1)
124
+ bin_max_y = np.ceil(max_y + 0.1)
125
+ bin_min_iteration = burn_in + 0.5
126
+ bin_max_iteration = nof_iterations + 0.5
127
+
128
+ max_nof_sources = all_source_locations.shape[1]
129
+
130
+ x_edges = np.arange(start=bin_min_x, stop=bin_max_x + bin_size_x, step=bin_size_x)
131
+ y_edges = np.arange(start=bin_min_y, stop=bin_max_y + bin_size_y, step=bin_size_y)
132
+ iteration_edges = np.arange(start=bin_min_iteration, stop=bin_max_iteration + bin_size_y, step=1)
133
+
134
+ result_x_vals = all_source_locations[0, :, :].flatten()
135
+ result_y_vals = all_source_locations[1, :, :].flatten()
136
+ result_z_vals = all_source_locations[2, :, :].flatten()
137
+
138
+ result_iteration_vals = np.array(range(nof_iterations)).reshape(1, -1) + 1
139
+ result_iteration_vals = np.tile(result_iteration_vals, (max_nof_sources, 1)).flatten()
140
+ results_estimates = model_object.mcmc.store["s"].flatten()
141
+
142
+ result_weighted, _ = np.histogramdd(
143
+ sample=np.array([result_x_vals, result_y_vals, result_iteration_vals]).T,
144
+ bins=[x_edges, y_edges, iteration_edges],
145
+ weights=results_estimates,
146
+ density=False,
147
+ )
148
+
149
+ count_result, edges_result = np.histogramdd(
150
+ sample=np.array([result_x_vals, result_y_vals, result_iteration_vals]).T,
151
+ bins=[x_edges, y_edges, iteration_edges],
152
+ density=False,
153
+ )
154
+
155
+ overall_count = np.array(np.sum(count_result, axis=2))
156
+ normalized_count = overall_count / (nof_iterations - burn_in)
157
+ count_boolean = normalized_count >= normalized_count_limit
158
+
159
+ summary_result = create_aggregation(
160
+ result_iteration_vals=result_iteration_vals,
161
+ burn_in=burn_in,
162
+ result_x_vals=result_x_vals,
163
+ result_y_vals=result_y_vals,
164
+ result_z_vals=result_z_vals,
165
+ results_estimates=results_estimates,
166
+ count_boolean=count_boolean,
167
+ x_edges=x_edges,
168
+ y_edges=y_edges,
169
+ nof_iterations=nof_iterations,
170
+ ref_latitude=ref_latitude,
171
+ ref_longitude=ref_longitude,
172
+ ref_altitude=ref_altitude,
173
+ )
174
+
175
+ return result_weighted, overall_count, normalized_count, count_boolean, edges_result[:2], summary_result
176
+
177
+
178
+ def create_lla_polygons_from_xy_points(
179
+ points_array: list[np.ndarray],
180
+ ref_latitude: float,
181
+ ref_longitude: float,
182
+ ref_altitude: float,
183
+ boolean_mask: Union[np.ndarray, None] = None,
184
+ ) -> list[geometry.Polygon]:
185
+ """Function to create polygons in LLA coordinates from a grid of points in ENU coordinates.
186
+
187
+ This function takes a grid of East-North points, these points are used as center points for a pixel grid. The pixel
188
+ grid is then converted to LLA coordinates and these center points are used to create a polygon in LLA coordinates.
189
+ A polygon is only created if the boolean mask for that pixel is True. In case one unique East-North point is
190
+ available, a predefined grid size of 1e-6 (equaling to 0.0036 seconds) is assumed.
191
+
192
+ Args:
193
+ points_array (list[np.ndarray]): List of arrays of grid of points in ENU coordinates.
194
+ ref_latitude (float): Reference latitude in degrees of ENU coordinate system.
195
+ ref_longitude (float): Reference longitude in degrees of ENU coordinate system.
196
+ ref_altitude (float): Reference altitude in meters of ENU coordinate system.
197
+ boolean_mask (np.ndarray, optional): Boolean mask to indicate which pixels to create polygons for.
198
+ Defaults to None which means all pixels are used.
199
+
200
+ Returns:
201
+ list[geometry.Polygon]: List of polygons in LLA coordinates
202
+ """
203
+ if boolean_mask is None:
204
+ boolean_mask = np.ones_like(points_array, dtype=bool)
205
+
206
+ enu_x = points_array[0]
207
+ enu_x = enu_x[:-1] + np.diff(enu_x) / 2
208
+ enu_y = points_array[1]
209
+ enu_y = enu_y[:-1] + np.diff(enu_y) / 2
210
+
211
+ enu_x, enu_y = np.meshgrid(enu_x, enu_y, indexing="ij")
212
+
213
+ enu_object_full_grid = ENU(ref_latitude=ref_latitude, ref_longitude=ref_longitude, ref_altitude=ref_altitude)
214
+ enu_object_full_grid.east = enu_x.flatten()
215
+ enu_object_full_grid.north = enu_y.flatten()
216
+ enu_object_full_grid.up = np.zeros_like(enu_object_full_grid.north)
217
+ lla_object_full_grid = enu_object_full_grid.to_lla()
218
+
219
+ _, gridsize_lat = is_regularly_spaced(lla_object_full_grid.latitude, tolerance=1e-6)
220
+ _, gridsize_lon = is_regularly_spaced(lla_object_full_grid.longitude, tolerance=1e-6)
221
+
222
+ if np.isnan(gridsize_lat):
223
+ gridsize_lat = 1e-6
224
+ if np.isnan(gridsize_lon):
225
+ gridsize_lon = 1e-6
226
+
227
+ polygons = [
228
+ geometry.box(
229
+ lla_object_full_grid.longitude[idx] - gridsize_lon / 2,
230
+ lla_object_full_grid.latitude[idx] - gridsize_lat / 2,
231
+ lla_object_full_grid.longitude[idx] + gridsize_lon / 2,
232
+ lla_object_full_grid.latitude[idx] + gridsize_lat / 2,
233
+ )
234
+ for idx in np.argwhere(boolean_mask.flatten()).flatten()
235
+ ]
236
+
237
+ return polygons
238
+
239
+
240
+ def create_aggregation(
241
+ result_x_vals: np.ndarray,
242
+ result_y_vals: np.ndarray,
243
+ result_z_vals: np.ndarray,
244
+ results_estimates: np.ndarray,
245
+ result_iteration_vals: np.ndarray,
246
+ count_boolean: np.ndarray,
247
+ x_edges: np.ndarray,
248
+ y_edges: np.ndarray,
249
+ nof_iterations: int,
250
+ burn_in: int,
251
+ ref_latitude: float,
252
+ ref_longitude: float,
253
+ ref_altitude: float,
254
+ ) -> pd.DataFrame:
255
+ """Function to create the aggregated information for the blobs of estimates.
256
+
257
+ We identify all blobs of estimates which appear close together on the map by looking at connected pixels in the
258
+ count_boolean array. Next we find the summary statistics for all estimates in that blob like overall median and
259
+ IQR estimate, mean location and the likelihood of that blob.
260
+
261
+ When multiple sources are present in the same blob at the same iteration we first sum those emission rate
262
+ estimates before taking the median.
263
+
264
+ If no blobs are found a dataframe with nan values is return to avoid breaking plotting code which calls this
265
+ function.
266
+
267
+ Args:
268
+ result_x_vals (np.ndarray): X-coordinate of estimates, flattened array of (n_sources_max * nof_iterations,).
269
+ result_y_vals (np.ndarray): Y-coordinate of estimates, flattened array of (n_sources_max * nof_iterations,).
270
+ result_z_vals (np.ndarray): Z-coordinate of estimates, flattened array of (n_sources_max * nof_iterations,).
271
+ results_estimates (np.ndarray): Emission rate estimates, flattened array of
272
+ (n_sources_max * nof_iterations,).
273
+ result_iteration_vals (np.ndarray): Iteration number corresponding each estimated value, flattened array
274
+ of (n_sources_max * nof_iterations,).
275
+ count_boolean (np.ndarray): Boolean array which indicates if likelihood of pixel is over threshold.
276
+ x_edges (np.ndarray): Pixel edges x-coordinates.
277
+ y_edges (np.ndarray): Pixel edges y-coordinates.
278
+ nof_iterations (int): Number of iterations used in MCMC.
279
+ burn_in (int): Burn-in used in MCMC.
280
+ ref_latitude (float): Reference latitude in degrees of ENU coordinate system.
281
+ ref_longitude (float): Reference longitude in degrees of ENU coordinate system.
282
+ ref_altitude (float): Reference altitude in meters of ENU coordinate system.
283
+
284
+ Returns:
285
+ summary_result (pd.DataFrame): Summary statistics for each blob of estimates.
286
+
287
+ """
288
+ labeled_array, num_features = label(input=count_boolean, structure=np.ones((3, 3)))
289
+
290
+ if num_features == 0:
291
+ summary_result = return_empty_summary_dataframe()
292
+ return summary_result
293
+
294
+ burn_in_bool = result_iteration_vals > burn_in
295
+ nan_x_vals = np.isnan(result_x_vals)
296
+ nan_y_vals = np.isnan(result_y_vals)
297
+ nan_z_vals = np.isnan(result_z_vals)
298
+ no_nan_idx = np.logical_not(np.logical_or(np.logical_or(nan_x_vals, nan_y_vals), nan_z_vals))
299
+ no_nan_and_burn_in_bool = np.logical_and(no_nan_idx, burn_in_bool)
300
+ result_x_vals_no_nan = result_x_vals[no_nan_and_burn_in_bool]
301
+ result_y_vals_no_nan = result_y_vals[no_nan_and_burn_in_bool]
302
+ result_z_vals_no_nan = result_z_vals[no_nan_and_burn_in_bool]
303
+ results_estimates_no_nan = results_estimates[no_nan_and_burn_in_bool]
304
+ result_iteration_vals_no_nan = result_iteration_vals[no_nan_and_burn_in_bool]
305
+
306
+ x_idx = np.digitize(result_x_vals_no_nan, x_edges, right=False) - 1
307
+ y_idx = np.digitize(result_y_vals_no_nan, y_edges, right=False) - 1
308
+ bin_numbers = np.ravel_multi_index((x_idx, y_idx), labeled_array.shape)
309
+
310
+ bin_numbers_per_label = [
311
+ np.ravel_multi_index(np.nonzero(labeled_array == value), labeled_array.shape)
312
+ for value in np.array(range(num_features)) + 1
313
+ ]
314
+
315
+ summary_result = pd.DataFrame()
316
+ summary_result.index.name = "source_ID"
317
+
318
+ for label_idx, curr_bins in enumerate(bin_numbers_per_label):
319
+ boolean_for_result = np.isin(bin_numbers, curr_bins)
320
+ mean_x = np.mean(result_x_vals_no_nan[boolean_for_result])
321
+ mean_y = np.mean(result_y_vals_no_nan[boolean_for_result])
322
+ mean_z = np.mean(result_z_vals_no_nan[boolean_for_result])
323
+
324
+ unique_iteration_vals, indices, counts = np.unique(
325
+ result_iteration_vals_no_nan[boolean_for_result], return_inverse=True, return_counts=True
326
+ )
327
+ nof_iterations_present = unique_iteration_vals.size
328
+ blob_likelihood = nof_iterations_present / (nof_iterations - burn_in)
329
+ single_idx = np.argwhere(counts == 1)
330
+ results_estimates_for_blob = results_estimates_no_nan[boolean_for_result]
331
+ temp_estimate_result = results_estimates_for_blob[indices[single_idx.flatten()]]
332
+ multiple_idx = np.argwhere(counts > 1)
333
+ for single_idx in multiple_idx:
334
+ temp_val = np.sum(results_estimates_for_blob[indices == single_idx])
335
+ temp_estimate_result = np.append(temp_estimate_result, temp_val)
336
+
337
+ median_estimate = np.median(temp_estimate_result)
338
+ iqr_estimate = np.nanquantile(a=temp_estimate_result, q=0.75) - np.nanquantile(a=temp_estimate_result, q=0.25)
339
+ lower_bound = np.nanquantile(a=temp_estimate_result, q=0.025)
340
+ upper_bound = np.nanquantile(a=temp_estimate_result, q=0.975)
341
+ enu_object = ENU(ref_latitude=ref_latitude, ref_longitude=ref_longitude, ref_altitude=ref_altitude)
342
+ enu_object.east = mean_x
343
+ enu_object.north = mean_y
344
+ enu_object.up = mean_z
345
+ lla_object = enu_object.to_lla()
346
+
347
+ summary_result.loc[label_idx, "latitude"] = lla_object.latitude
348
+ summary_result.loc[label_idx, "longitude"] = lla_object.longitude
349
+ summary_result.loc[label_idx, "altitude"] = lla_object.altitude
350
+ summary_result.loc[label_idx, "height"] = mean_z
351
+ summary_result.loc[label_idx, "median_estimate"] = median_estimate
352
+ summary_result.loc[label_idx, "quantile_025"] = lower_bound
353
+ summary_result.loc[label_idx, "quantile_975"] = upper_bound
354
+ summary_result.loc[label_idx, "iqr_estimate"] = iqr_estimate
355
+ summary_result.loc[label_idx, "absolute_count_iterations"] = nof_iterations_present
356
+ summary_result.loc[label_idx, "blob_likelihood"] = blob_likelihood
357
+
358
+ summary_result = summary_result.astype({"absolute_count_iterations": "int"})
359
+
360
+ return summary_result
361
+
362
+
363
+ def return_empty_summary_dataframe() -> pd.DataFrame:
364
+ """Helper function to create and return an empty summary dataframe with predifined columns."""
365
+ summary_result = pd.DataFrame()
366
+ summary_result.index.name = "source_ID"
367
+ summary_result.loc[0, "latitude"] = np.nan
368
+ summary_result.loc[0, "longitude"] = np.nan
369
+ summary_result.loc[0, "altitude"] = np.nan
370
+ summary_result.loc[0, "height"] = np.nan
371
+ summary_result.loc[0, "median_estimate"] = np.nan
372
+ summary_result.loc[0, "quantile_025"] = np.nan
373
+ summary_result.loc[0, "quantile_975"] = np.nan
374
+ summary_result.loc[0, "iqr_estimate"] = np.nan
375
+ summary_result.loc[0, "absolute_count_iterations"] = np.nan
376
+ summary_result.loc[0, "blob_likelihood"] = np.nan
377
+ return summary_result