pyelq 1.1.2__tar.gz → 1.1.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. {pyelq-1.1.2 → pyelq-1.1.4}/PKG-INFO +1 -1
  2. {pyelq-1.1.2 → pyelq-1.1.4}/pyproject.toml +1 -1
  3. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/component/source_model.py +176 -52
  4. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/model.py +132 -14
  5. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/plotting/plot.py +55 -21
  6. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/support_functions/post_processing.py +21 -32
  7. {pyelq-1.1.2 → pyelq-1.1.4}/LICENSE.md +0 -0
  8. {pyelq-1.1.2 → pyelq-1.1.4}/LICENSES/Apache-2.0.txt +0 -0
  9. {pyelq-1.1.2 → pyelq-1.1.4}/README.md +0 -0
  10. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/__init__.py +0 -0
  11. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/component/__init__.py +0 -0
  12. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/component/background.py +0 -0
  13. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/component/component.py +0 -0
  14. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/component/error_model.py +0 -0
  15. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/component/offset.py +0 -0
  16. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/coordinate_system.py +0 -0
  17. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/data_access/__init__.py +0 -0
  18. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/data_access/data_access.py +0 -0
  19. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/dispersion_model/__init__.py +0 -0
  20. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/dispersion_model/gaussian_plume.py +0 -0
  21. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/dlm.py +0 -0
  22. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/gas_species.py +0 -0
  23. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/meteorology.py +0 -0
  24. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/plotting/__init__.py +0 -0
  25. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/preprocessing.py +0 -0
  26. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/sensor/__init__.py +0 -0
  27. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/sensor/beam.py +0 -0
  28. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/sensor/satellite.py +0 -0
  29. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/sensor/sensor.py +0 -0
  30. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/source_map.py +0 -0
  31. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/support_functions/__init__.py +0 -0
  32. {pyelq-1.1.2 → pyelq-1.1.4}/src/pyelq/support_functions/spatio_temporal_interpolation.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: pyelq
3
- Version: 1.1.2
3
+ Version: 1.1.4
4
4
  Summary: Package for detection, localization and quantification code.
5
5
  License: Apache-2.0
6
6
  Keywords: gas dispersion,emission,detection,localization,quantification
@@ -8,7 +8,7 @@ build-backend = "poetry.core.masonry.api"
8
8
 
9
9
  [tool.poetry]
10
10
  name = "pyelq"
11
- version = "1.1.2"
11
+ version = "1.1.4"
12
12
  description = "Package for detection, localization and quantification code."
13
13
  authors = ["Bas van de Kerkhof", "Matthew Jones", "David Randell"]
14
14
  homepage = "https://sede-open.github.io/pyELQ/"
@@ -19,7 +19,7 @@ A SourceModel instance inherits from 3 super-classes:
19
19
  from abc import abstractmethod
20
20
  from copy import deepcopy
21
21
  from dataclasses import dataclass, field
22
- from typing import TYPE_CHECKING, Tuple, Union
22
+ from typing import TYPE_CHECKING, Optional, Tuple, Union
23
23
 
24
24
  import numpy as np
25
25
  from openmcmc import parameter
@@ -43,7 +43,50 @@ if TYPE_CHECKING:
43
43
 
44
44
 
45
45
  @dataclass
46
- class SourceGrouping:
46
+ class ParameterMapping:
47
+ """Class for defining mapping variable/parameter labels needed for creating an analysis.
48
+
49
+ In instances where we want to include multiple source_model instances in an MCMC analysis, we can apply a suffix to
50
+ all of the parameter names in the mapping dictionary. This allows us to create separate variables for different
51
+ source map types, so that these can be associated with different sampler types in the MCMC analysis.
52
+
53
+ Attributes:
54
+ map (dict): dictionary containing mapping between variable types and MCMC parameters.
55
+
56
+ """
57
+
58
+ map: dict = field(
59
+ default_factory=lambda: {
60
+ "source": "s",
61
+ "coupling_matrix": "A",
62
+ "emission_rate_mean": "mu_s",
63
+ "emission_rate_precision": "lambda_s",
64
+ "allocation": "alloc_s",
65
+ "source_prob": "s_prob",
66
+ "precision_prior_shape": "a_lam_s",
67
+ "precision_prior_rate": "b_lam_s",
68
+ "source_location": "z_src",
69
+ "number_sources": "n_src",
70
+ "number_source_rate": "rho",
71
+ }
72
+ )
73
+
74
+ def append_string(self, string: str = None):
75
+ """Apply the supplied string as a suffix to all of the values in the mapping dictionary.
76
+
77
+ For example: {'source': 's'} would become {'source': 's_fixed'} when string = 'fixed' is passed as the argument.
78
+ If string is None, nothing is appended.
79
+
80
+ Args:
81
+ string (str): string to append to the variable names.
82
+
83
+ """
84
+ for key, value in self.map.items():
85
+ self.map[key] = value + "_" + string
86
+
87
+
88
+ @dataclass
89
+ class SourceGrouping(ParameterMapping):
47
90
  """Superclass for source grouping approach.
48
91
 
49
92
  Source grouping method determines the group allocation of each source in the model, e.g: slab and spike
@@ -52,13 +95,11 @@ class SourceGrouping:
52
95
  Attributes:
53
96
  nof_sources (int): number of sources in the model.
54
97
  emission_rate_mean (Union[float, np.ndarray]): prior mean parameter for the emission rate distribution.
55
- _source_key (str): label for the source parameter to be used in the distributions, samplers, MCMC state etc.
56
98
 
57
99
  """
58
100
 
59
101
  nof_sources: int = field(init=False)
60
102
  emission_rate_mean: Union[float, np.ndarray] = field(init=False)
61
- _source_key: str = field(init=False, default="s")
62
103
 
63
104
  @abstractmethod
64
105
  def make_allocation_model(self, model: list) -> list:
@@ -118,8 +159,14 @@ class NullGrouping(SourceGrouping):
118
159
  2) The case where the dimensionality of the source map is changing during the inversion, and a common prior
119
160
  mean and precision term are used for all sources.
120
161
 
162
+ Attributes:
163
+ number_on_sources (np.ndarray): number of sources switched on in the solution, per iteration. Extracted as a
164
+ property from the MCMC samples in self.from_mcmc_group().
165
+
121
166
  """
122
167
 
168
+ number_on_sources: np.ndarray = field(init=False)
169
+
123
170
  def make_allocation_model(self, model: list) -> list:
124
171
  """Initialise the source allocation part of the model.
125
172
 
@@ -163,20 +210,19 @@ class NullGrouping(SourceGrouping):
163
210
  dict: state updated with parameters related to the source grouping.
164
211
 
165
212
  """
166
- state["mu_s"] = np.array(self.emission_rate_mean, ndmin=1)
167
- state["alloc_s"] = np.zeros((self.nof_sources, 1), dtype="int")
213
+ state[self.map["emission_rate_mean"]] = np.array(self.emission_rate_mean, ndmin=1)
214
+ state[self.map["allocation"]] = np.zeros((self.nof_sources, 1), dtype="int")
168
215
  return state
169
216
 
170
217
  def from_mcmc_group(self, store: dict):
171
218
  """Extract posterior allocation samples from the MCMC sampler, attach them to the class.
172
219
 
173
- We have not implemented anything here as there is nothing to fetch from the MCMC solution here for the
174
- NullGrouping Class.
220
+ Gets the number of sources present in each iteration of the MCMC sampler, and attaches this as a class property.
175
221
 
176
222
  Args:
177
223
  store (dict): dictionary containing samples from the MCMC.
178
-
179
224
  """
225
+ self.number_on_sources = np.count_nonzero(np.logical_not(np.isnan(store[self.map["source"]])), axis=0)
180
226
 
181
227
 
182
228
  @dataclass
@@ -190,11 +236,13 @@ class SlabAndSpike(SourceGrouping):
190
236
  slab_probability (float): prior probability of allocation to the slab component. Defaults to 0.05.
191
237
  allocation (np.ndarray): set of allocation samples, with shape=(n_sources, n_iterations). Attached to
192
238
  the class by self.from_mcmc_group().
239
+ number_on_sources (np.ndarray): number of sources switched on in the solution, per iteration.
193
240
 
194
241
  """
195
242
 
196
243
  slab_probability: float = 0.05
197
244
  allocation: np.ndarray = field(init=False)
245
+ number_on_sources: np.ndarray = field(init=False)
198
246
 
199
247
  def make_allocation_model(self, model: list) -> list:
200
248
  """Initialise the source allocation part of the model.
@@ -206,7 +254,7 @@ class SlabAndSpike(SourceGrouping):
206
254
  list: overall model list, updated with allocation distribution.
207
255
 
208
256
  """
209
- model.append(Categorical("alloc_s", prob="s_prob"))
257
+ model.append(Categorical(self.map["allocation"], prob=self.map["source_prob"]))
210
258
  return model
211
259
 
212
260
  def make_allocation_sampler(self, model: Model, sampler_list: list) -> list:
@@ -220,7 +268,9 @@ class SlabAndSpike(SourceGrouping):
220
268
  list: sampler_list updated with sampler for the source allocation.
221
269
 
222
270
  """
223
- sampler_list.append(MixtureAllocation(param="alloc_s", model=model, response_param=self._source_key))
271
+ sampler_list.append(
272
+ MixtureAllocation(param=self.map["allocation"], model=model, response_param=self.map["source"])
273
+ )
224
274
  return sampler_list
225
275
 
226
276
  def make_allocation_state(self, state: dict) -> dict:
@@ -233,9 +283,11 @@ class SlabAndSpike(SourceGrouping):
233
283
  dict: state updated with parameters related to the source grouping.
234
284
 
235
285
  """
236
- state["mu_s"] = np.array(self.emission_rate_mean, ndmin=1)
237
- state["s_prob"] = np.tile(np.array([self.slab_probability, 1 - self.slab_probability]), (self.nof_sources, 1))
238
- state["alloc_s"] = np.ones((self.nof_sources, 1), dtype="int")
286
+ state[self.map["emission_rate_mean"]] = np.array(self.emission_rate_mean, ndmin=1)
287
+ state[self.map["source_prob"]] = np.tile(
288
+ np.array([self.slab_probability, 1 - self.slab_probability]), (self.nof_sources, 1)
289
+ )
290
+ state[self.map["allocation"]] = np.ones((self.nof_sources, 1), dtype="int")
239
291
  return state
240
292
 
241
293
  def from_mcmc_group(self, store: dict):
@@ -245,11 +297,12 @@ class SlabAndSpike(SourceGrouping):
245
297
  store (dict): dictionary containing samples from the MCMC.
246
298
 
247
299
  """
248
- self.allocation = store["alloc_s"]
300
+ self.allocation = store[self.map["allocation"]]
301
+ self.number_on_sources = self.allocation.shape[0] - np.sum(self.allocation, axis=0)
249
302
 
250
303
 
251
304
  @dataclass
252
- class SourceDistribution:
305
+ class SourceDistribution(ParameterMapping):
253
306
  """Superclass for source emission rate distribution.
254
307
 
255
308
  Source distribution determines the type of prior to be used for the source emission rates, and the transformation
@@ -349,9 +402,13 @@ class NormalResponse(SourceDistribution):
349
402
 
350
403
  model.append(
351
404
  mcmcNormal(
352
- "s",
353
- mean=parameter.MixtureParameterVector(param="mu_s", allocation="alloc_s"),
354
- precision=parameter.MixtureParameterMatrix(param="lambda_s", allocation="alloc_s"),
405
+ self.map["source"],
406
+ mean=parameter.MixtureParameterVector(
407
+ param=self.map["emission_rate_mean"], allocation=self.map["allocation"]
408
+ ),
409
+ precision=parameter.MixtureParameterMatrix(
410
+ param=self.map["emission_rate_precision"], allocation=self.map["allocation"]
411
+ ),
355
412
  domain_response_lower=domain_response_lower,
356
413
  )
357
414
  )
@@ -370,7 +427,7 @@ class NormalResponse(SourceDistribution):
370
427
  """
371
428
  if sampler_list is None:
372
429
  sampler_list = []
373
- sampler_list.append(NormalNormal("s", model))
430
+ sampler_list.append(NormalNormal(self.map["source"], model))
374
431
  return sampler_list
375
432
 
376
433
  def make_source_state(self, state: dict) -> dict:
@@ -383,7 +440,7 @@ class NormalResponse(SourceDistribution):
383
440
  dict: state updated with initial emission rate vector.
384
441
 
385
442
  """
386
- state["s"] = np.zeros((self.nof_sources, 1))
443
+ state[self.map["source"]] = np.zeros((self.nof_sources, 1))
387
444
  return state
388
445
 
389
446
  def from_mcmc_dist(self, store: dict):
@@ -393,7 +450,7 @@ class NormalResponse(SourceDistribution):
393
450
  store (dict): dictionary containing samples from the MCMC.
394
451
 
395
452
  """
396
- self.emission_rate = store["s"]
453
+ self.emission_rate = store[self.map["source"]]
397
454
 
398
455
 
399
456
  @dataclass
@@ -447,12 +504,19 @@ class SourceModel(Component, SourceGrouping, SourceDistribution):
447
504
  initial_precision (Union[float, np.ndarray]): initial value for the source emission rate precision parameter.
448
505
  precision_scalar (np.ndarray): precision values generated by MCMC inversion.
449
506
 
507
+ all_source_locations (ENU): ENU object containing the locations of after the mcmc has been run, therefore in
508
+ the situation where the reversible_jump == True, this will be the final locations of the sources in the
509
+ solution over all iterations. For the case where reversible_jump == False, this will be the locations of
510
+ the sources in the source map and will not change during the course of the inversion.
511
+ individual_source_labels (list, optional): list of labels for each source in the source map, defaults to None.
512
+
450
513
  coverage_detection (float): sensor detection threshold (in ppm) to be used for coverage calculations.
451
514
  coverage_test_source (float): test source (in kg/hr) which we wish to be able to see in coverage calculation.
452
515
 
453
516
  threshold_function (Callable): Callable function which returns a single value that defines the threshold
454
517
  for the coupling in a lambda function form. Examples: lambda x: np.quantile(x, 0.95, axis=0),
455
518
  lambda x: np.max(x, axis=0), lambda x: np.mean(x, axis=0). Defaults to np.quantile.
519
+ label_string (str): string to append to the parameter mapping, e.g. for fixed sources, defaults to None.
456
520
 
457
521
  """
458
522
 
@@ -476,11 +540,26 @@ class SourceModel(Component, SourceGrouping, SourceDistribution):
476
540
  initial_precision: Union[float, np.ndarray] = 1.0
477
541
  precision_scalar: np.ndarray = field(init=False)
478
542
 
543
+ all_source_locations: np.ndarray = field(init=False)
544
+ individual_source_labels: Optional[list] = None
545
+
479
546
  coverage_detection: float = 0.1
480
547
  coverage_test_source: float = 6.0
481
548
 
482
549
  threshold_function: callable = lambda x: np.quantile(x, 0.95, axis=0)
483
550
 
551
+ label_string: Optional[str] = None
552
+
553
+ def __post_init__(self):
554
+ """Post-initialisation of the class.
555
+
556
+ This function is called after the class has been initialised,
557
+ and is used to set up the mapping dictionary for the class by applying the
558
+ append_string function to the mapping dictionary.
559
+ """
560
+ if self.label_string is not None:
561
+ self.append_string(self.label_string)
562
+
484
563
  @property
485
564
  def nof_sources(self):
486
565
  """Get number of sources in the source map."""
@@ -583,15 +662,17 @@ class SourceModel(Component, SourceGrouping, SourceDistribution):
583
662
  state (dict): state dictionary containing updated coupling information.
584
663
 
585
664
  """
586
- self.dispersion_model.source_map.location.from_array(state["z_src"][:, [update_column]].T)
665
+ self.dispersion_model.source_map.location.from_array(state[self.map["source_location"]][:, [update_column]].T)
587
666
  new_coupling = self.dispersion_model.compute_coupling(
588
667
  self.sensor_object, self.meteorology, self.gas_species, output_stacked=True, run_interpolation=False
589
668
  )
590
669
 
591
- if update_column == state["A"].shape[1]:
592
- state["A"] = np.concatenate((state["A"], new_coupling), axis=1)
593
- elif update_column < state["A"].shape[1]:
594
- state["A"][:, [update_column]] = new_coupling
670
+ if update_column == state[self.map["coupling_matrix"]].shape[1]:
671
+ state[self.map["coupling_matrix"]] = np.concatenate(
672
+ (state[self.map["coupling_matrix"]], new_coupling), axis=1
673
+ )
674
+ elif update_column < state[self.map["coupling_matrix"]].shape[1]:
675
+ state[self.map["coupling_matrix"]][:, [update_column]] = new_coupling
595
676
  else:
596
677
  raise ValueError("Invalid column specification for updating.")
597
678
  return state
@@ -629,10 +710,12 @@ class SourceModel(Component, SourceGrouping, SourceDistribution):
629
710
  (i.e. log[p(current | proposed)])
630
711
 
631
712
  """
632
- prop_state = self.update_coupling_column(prop_state, int(prop_state["n_src"]) - 1)
633
- prop_state["alloc_s"] = np.concatenate((prop_state["alloc_s"], np.array([0], ndmin=2)), axis=0)
713
+ prop_state = self.update_coupling_column(prop_state, int(prop_state[self.map["number_sources"]]) - 1)
714
+ prop_state[self.map["allocation"]] = np.concatenate(
715
+ (prop_state[self.map["allocation"]], np.array([0], ndmin=2)), axis=0
716
+ )
634
717
  in_cov_area = self.dispersion_model.compute_coverage(
635
- prop_state["A"][:, -1],
718
+ prop_state[self.map["coupling_matrix"]][:, -1],
636
719
  coverage_threshold=self.coverage_threshold,
637
720
  threshold_function=self.threshold_function,
638
721
  )
@@ -644,8 +727,7 @@ class SourceModel(Component, SourceGrouping, SourceDistribution):
644
727
 
645
728
  return prop_state, logp_pr_g_cr, logp_cr_g_pr
646
729
 
647
- @staticmethod
648
- def death_function(current_state: dict, prop_state: dict, deletion_index: int) -> Tuple[dict, float, float]:
730
+ def death_function(self, current_state: dict, prop_state: dict, deletion_index: int) -> Tuple[dict, float, float]:
649
731
  """Update MCMC state based on source death proposal.
650
732
 
651
733
  Proposed state updated as follows:
@@ -669,8 +751,10 @@ class SourceModel(Component, SourceGrouping, SourceDistribution):
669
751
  (i.e. log[p(current | proposed)])
670
752
 
671
753
  """
672
- prop_state["A"] = np.delete(prop_state["A"], obj=deletion_index, axis=1)
673
- prop_state["alloc_s"] = np.delete(prop_state["alloc_s"], obj=deletion_index, axis=0)
754
+ prop_state[self.map["coupling_matrix"]] = np.delete(
755
+ prop_state[self.map["coupling_matrix"]], obj=deletion_index, axis=1
756
+ )
757
+ prop_state[self.map["allocation"]] = np.delete(prop_state[self.map["allocation"]], obj=deletion_index, axis=0)
674
758
  logp_pr_g_cr = 0.0
675
759
  logp_cr_g_pr = 0.0
676
760
 
@@ -693,7 +777,7 @@ class SourceModel(Component, SourceGrouping, SourceDistribution):
693
777
  prop_state = deepcopy(current_state)
694
778
  prop_state = self.update_coupling_column(prop_state, update_column)
695
779
  in_cov_area = self.dispersion_model.compute_coverage(
696
- prop_state["A"][:, update_column],
780
+ prop_state[self.map["coupling_matrix"]][:, update_column],
697
781
  coverage_threshold=self.coverage_threshold,
698
782
  threshold_function=self.threshold_function,
699
783
  )
@@ -714,16 +798,22 @@ class SourceModel(Component, SourceGrouping, SourceDistribution):
714
798
  model = self.make_allocation_model(model)
715
799
  model = self.make_source_model(model)
716
800
  if self.update_precision:
717
- model.append(Gamma("lambda_s", shape="a_lam_s", rate="b_lam_s"))
801
+ model.append(
802
+ Gamma(
803
+ self.map["emission_rate_precision"],
804
+ shape=self.map["precision_prior_shape"],
805
+ rate=self.map["precision_prior_rate"],
806
+ )
807
+ )
718
808
  if self.reversible_jump:
719
809
  model.append(
720
810
  Uniform(
721
- response="z_src",
811
+ response=self.map["source_location"],
722
812
  domain_response_lower=self.site_limits[:, [0]],
723
813
  domain_response_upper=self.site_limits[:, [1]],
724
814
  )
725
815
  )
726
- model.append(Poisson(response="n_src", rate="rho"))
816
+ model.append(Poisson(response=self.map["number_sources"], rate=self.map["number_source_rate"]))
727
817
  return model
728
818
 
729
819
  def make_sampler(self, model: Model, sampler_list: list) -> list:
@@ -740,7 +830,7 @@ class SourceModel(Component, SourceGrouping, SourceDistribution):
740
830
  sampler_list = self.make_source_sampler(model, sampler_list)
741
831
  sampler_list = self.make_allocation_sampler(model, sampler_list)
742
832
  if self.update_precision:
743
- sampler_list.append(NormalGamma("lambda_s", model))
833
+ sampler_list.append(NormalGamma(self.map["emission_rate_precision"], model))
744
834
  if self.reversible_jump:
745
835
  sampler_list = self.make_sampler_rjmcmc(model, sampler_list)
746
836
  return sampler_list
@@ -757,15 +847,15 @@ class SourceModel(Component, SourceGrouping, SourceDistribution):
757
847
  """
758
848
  state = self.make_allocation_state(state)
759
849
  state = self.make_source_state(state)
760
- state["A"] = self.coupling
761
- state["lambda_s"] = np.array(self.initial_precision, ndmin=1)
850
+ state[self.map["coupling_matrix"]] = self.coupling
851
+ state[self.map["emission_rate_precision"]] = np.array(self.initial_precision, ndmin=1)
762
852
  if self.update_precision:
763
- state["a_lam_s"] = np.ones_like(self.initial_precision) * self.prior_precision_shape
764
- state["b_lam_s"] = np.ones_like(self.initial_precision) * self.prior_precision_rate
853
+ state[self.map["precision_prior_shape"]] = np.ones_like(self.initial_precision) * self.prior_precision_shape
854
+ state[self.map["precision_prior_rate"]] = np.ones_like(self.initial_precision) * self.prior_precision_rate
765
855
  if self.reversible_jump:
766
- state["z_src"] = self.dispersion_model.source_map.location.to_array().T
767
- state["n_src"] = state["z_src"].shape[1]
768
- state["rho"] = self.rate_num_sources
856
+ state[self.map["source_location"]] = self.dispersion_model.source_map.location.to_array().T
857
+ state[self.map["number_sources"]] = state[self.map["source_location"]].shape[1]
858
+ state[self.map["number_source_rate"]] = self.rate_num_sources
769
859
  return state
770
860
 
771
861
  def make_sampler_rjmcmc(self, model: Model, sampler_list: list) -> list:
@@ -785,11 +875,13 @@ class SourceModel(Component, SourceGrouping, SourceDistribution):
785
875
  sampler_list (list): list of samplers updated with samplers corresponding to RJMCMC routine.
786
876
 
787
877
  """
788
- sampler_list[-1].max_variable_size = self.n_sources_max
878
+ for sampler in sampler_list:
879
+ if sampler.param == self.map["source"]:
880
+ sampler.max_variable_size = self.n_sources_max
789
881
 
790
882
  sampler_list.append(
791
883
  RandomWalkLoop(
792
- "z_src",
884
+ self.map["source_location"],
793
885
  model,
794
886
  step=self.random_walk_step_size,
795
887
  max_variable_size=(3, self.n_sources_max),
@@ -797,13 +889,18 @@ class SourceModel(Component, SourceGrouping, SourceDistribution):
797
889
  state_update_function=self.move_function,
798
890
  )
799
891
  )
800
- matching_params = {"variable": "s", "matrix": "A", "scale": 1.0, "limits": [0.0, 1e6]}
892
+ matching_params = {
893
+ "variable": self.map["source"],
894
+ "matrix": self.map["coupling_matrix"],
895
+ "scale": 1.0,
896
+ "limits": [0.0, 1e6],
897
+ }
801
898
  sampler_list.append(
802
899
  ReversibleJump(
803
- "n_src",
900
+ self.map["number_sources"],
804
901
  model,
805
902
  step=np.array([1.0], ndmin=2),
806
- associated_params="z_src",
903
+ associated_params=self.map["source_location"],
807
904
  n_max=self.n_sources_max,
808
905
  state_birth_function=self.birth_function,
809
906
  state_death_function=self.death_function,
@@ -815,14 +912,41 @@ class SourceModel(Component, SourceGrouping, SourceDistribution):
815
912
  def from_mcmc(self, store: dict):
816
913
  """Extract results of mcmc from mcmc.store and attach to components.
817
914
 
915
+ For the reversible jump case we extract all estimated source locations
916
+ per iteration. For the fixed sources case we grab the source locations
917
+ from the inputted sourcemap and repeat those for all iterations.
918
+
818
919
  Args:
819
920
  store (dict): mcmc result dictionary.
820
921
 
821
922
  """
822
923
  self.from_mcmc_group(store)
823
924
  self.from_mcmc_dist(store)
925
+ if self.individual_source_labels is None:
926
+ self.individual_source_labels = list(np.repeat(None, store[self.map["source"]].shape[0]))
927
+
824
928
  if self.update_precision:
825
- self.precision_scalar = store["lambda_s"]
929
+ self.precision_scalar = store[self.map["emission_rate_precision"]]
930
+
931
+ if self.reversible_jump:
932
+ reference_latitude = self.dispersion_model.source_map.location.ref_latitude
933
+ reference_longitude = self.dispersion_model.source_map.location.ref_longitude
934
+ ref_altitude = self.dispersion_model.source_map.location.ref_altitude
935
+ self.all_source_locations = ENU(
936
+ ref_latitude=reference_latitude,
937
+ ref_longitude=reference_longitude,
938
+ ref_altitude=ref_altitude,
939
+ east=store[self.map["source_location"]][0, :, :],
940
+ north=store[self.map["source_location"]][1, :, :],
941
+ up=store[self.map["source_location"]][2, :, :],
942
+ )
943
+
944
+ else:
945
+ location_temp = self.dispersion_model.source_map.location.to_enu()
946
+ location_temp.east = np.repeat(location_temp.east[:, np.newaxis], store["log_post"].shape[0], axis=1)
947
+ location_temp.north = np.repeat(location_temp.north[:, np.newaxis], store["log_post"].shape[0], axis=1)
948
+ location_temp.up = np.repeat(location_temp.up[:, np.newaxis], store["log_post"].shape[0], axis=1)
949
+ self.all_source_locations = location_temp
826
950
 
827
951
  def plot_iterations(self, plot: "Plot", burn_in_value: int, y_axis_type: str = "linear") -> "Plot":
828
952
  """Plot the emission rate estimates source model object against MCMC iteration.
@@ -9,6 +9,7 @@ This module provides a class definition for the main functionalities of the code
9
9
  openMCMC repo and defining some plotting wrappers.
10
10
 
11
11
  """
12
+ import re
12
13
  import warnings
13
14
  from dataclasses import dataclass, field
14
15
  from typing import Union
@@ -23,6 +24,7 @@ from pyelq.component.background import Background, SpatioTemporalBackground
23
24
  from pyelq.component.error_model import BySensor, ErrorModel
24
25
  from pyelq.component.offset import PerSensor
25
26
  from pyelq.component.source_model import Normal, SourceModel
27
+ from pyelq.coordinate_system import ENU
26
28
  from pyelq.gas_species import GasSpecies
27
29
  from pyelq.meteorology import Meteorology, MeteorologyGroup
28
30
  from pyelq.plotting.plot import Plot
@@ -62,7 +64,7 @@ class ELQModel:
62
64
  meteorology: Union[Meteorology, MeteorologyGroup],
63
65
  gas_species: GasSpecies,
64
66
  background: Background = SpatioTemporalBackground(),
65
- source_model: SourceModel = Normal(),
67
+ source_model: Union[list, SourceModel] = Normal(),
66
68
  error_model: ErrorModel = BySensor(),
67
69
  offset_model: PerSensor = None,
68
70
  ):
@@ -82,7 +84,9 @@ class ELQModel:
82
84
  meteorology (Union[Meteorology, MeteorologyGroup]): meteorology data.
83
85
  gas_species (GasSpecies): gas species object.
84
86
  background (Background): background model specification. Defaults to SpatioTemporalBackground().
85
- source_model (SourceModel): source model specification. Defaults to Normal().
87
+ source_model (Union[list, SourceModel]): source model specification. This can be a list of multiple
88
+ SourceModels or a single SourceModel. Defaults to Normal(). If a single SourceModel is used, it will
89
+ be converted to a list.
86
90
  error_model (Precision): measurement precision model specification. Defaults to BySensor().
87
91
  offset_model (PerSensor): offset model specification. Defaults to None.
88
92
 
@@ -92,10 +96,19 @@ class ELQModel:
92
96
  self.gas_species = gas_species
93
97
  self.components = {
94
98
  "background": background,
95
- "source": source_model,
96
99
  "error_model": error_model,
97
100
  "offset": offset_model,
98
101
  }
102
+
103
+ if source_model is not None:
104
+ if not isinstance(source_model, list):
105
+ source_model = [source_model]
106
+ for source in source_model:
107
+ if source.label_string is None:
108
+ self.components["source"] = source
109
+ else:
110
+ self.components["source_" + source.label_string] = source
111
+
99
112
  if error_model is None:
100
113
  self.components["error_model"] = BySensor()
101
114
  warnings.warn("None is not an allowed type for error_model: resetting to default BySensor model.")
@@ -107,17 +120,19 @@ class ELQModel:
107
120
  """Take data inputs and extract relevant properties."""
108
121
  self.form = {}
109
122
  self.transform = {}
110
- component_keys = list(self.components.keys())
111
- if "background" in component_keys:
112
- self.form["bg"] = "B_bg"
113
- self.transform["bg"] = False
114
- if "source" in component_keys:
115
- self.transform["s"] = False
116
- self.form["s"] = "A"
117
- if "offset" in component_keys:
118
- self.form["d"] = "B_d"
119
- self.transform["d"] = False
120
- for key in component_keys:
123
+ for key, component in self.components.items():
124
+
125
+ if "background" in key:
126
+ self.form["bg"] = "B_bg"
127
+ self.transform["bg"] = False
128
+ if re.match("source", key):
129
+ source_component_map = component.map
130
+ self.transform[source_component_map["source"]] = False
131
+ self.form[source_component_map["source"]] = source_component_map["coupling_matrix"]
132
+ if "offset" in key:
133
+ self.form["d"] = "B_d"
134
+ self.transform["d"] = False
135
+
121
136
  self.components[key].initialise(self.sensor_object, self.meteorology, self.gas_species)
122
137
 
123
138
  def to_mcmc(self):
@@ -175,6 +190,109 @@ class ELQModel:
175
190
  for key in self.mcmc.store:
176
191
  state[key] = self.mcmc.store[key]
177
192
 
193
+ self.make_combined_source_model()
194
+
195
+ def make_combined_source_model(self):
196
+ """Aggregate multiple individual source models into a single combined source model.
197
+
198
+ This function iterates through the existing source models stored in `self.components` and consolidates them
199
+ into a unified source model named `"sources_combined"`. This is particularly useful when multiple source
200
+ models are involved in an analysis, and a merged representation is required for visualization.
201
+
202
+ The combined source model is created as an instance of the `Normal` model, with the label string
203
+ "sources_combined" with the following attributes:
204
+ - emission_rate: concatenated across all source models.
205
+ - all_source_locations: concatenated across all source models.
206
+ - number_on_sources: derived by summing the individual source counts across all source models
207
+ - label_string: concatenated across all source models.
208
+ - individual_source_labels: concatenated across all source models.
209
+
210
+ Once combined, the `"sources_combined"` model is stored in the `self.components` dictionary for later use.
211
+
212
+ Raises:
213
+ ValueError: If the reference locations of the individual source models are inconsistent.
214
+ This is checked by comparing the reference latitude, longitude, and altitude of each source model.
215
+
216
+ """
217
+ combined_model = Normal(label_string="sources_combined")
218
+ emission_rate = np.empty((0, self.mcmc.n_iter))
219
+ all_source_locations_east = np.empty((0, self.mcmc.n_iter))
220
+ all_source_locations_north = np.empty((0, self.mcmc.n_iter))
221
+ all_source_locations_up = np.empty((0, self.mcmc.n_iter))
222
+ number_on_sources = np.empty((0, self.mcmc.n_iter))
223
+ label_string = []
224
+ individual_source_labels = []
225
+
226
+ ref_latitude = None
227
+ ref_longitude = None
228
+ ref_altitude = None
229
+ for key, component in self.components.items():
230
+ if re.match("source", key):
231
+ comp_ref_latitude = component.all_source_locations.ref_latitude
232
+ comp_ref_longitude = component.all_source_locations.ref_longitude
233
+ comp_ref_altitude = component.all_source_locations.ref_altitude
234
+ if ref_latitude is None and ref_longitude is None and ref_altitude is None:
235
+ ref_latitude = comp_ref_latitude
236
+ ref_longitude = comp_ref_longitude
237
+ ref_altitude = comp_ref_altitude
238
+ else:
239
+ if (
240
+ not np.isclose(ref_latitude, comp_ref_latitude)
241
+ or not np.isclose(ref_longitude, comp_ref_longitude)
242
+ or not np.isclose(ref_altitude, comp_ref_altitude)
243
+ ):
244
+ raise ValueError(
245
+ f"Inconsistent reference locations in component '{key}'. "
246
+ "All source models must share the same reference location."
247
+ )
248
+ emission_rate = np.concatenate((emission_rate, component.emission_rate))
249
+ number_on_sources = np.concatenate(
250
+ (
251
+ number_on_sources.reshape((-1, self.mcmc.n_iter)),
252
+ component.number_on_sources.reshape(-1, self.mcmc.n_iter),
253
+ ),
254
+ axis=0,
255
+ )
256
+ label_string.append(component.label_string)
257
+ individual_source_labels.append(component.individual_source_labels)
258
+
259
+ all_source_locations_east = np.concatenate(
260
+ (
261
+ all_source_locations_east,
262
+ component.all_source_locations.east.reshape((-1, self.mcmc.n_iter)),
263
+ ),
264
+ axis=0,
265
+ )
266
+ all_source_locations_north = np.concatenate(
267
+ (
268
+ all_source_locations_north,
269
+ component.all_source_locations.north.reshape((-1, self.mcmc.n_iter)),
270
+ ),
271
+ axis=0,
272
+ )
273
+ all_source_locations_up = np.concatenate(
274
+ (
275
+ all_source_locations_up,
276
+ component.all_source_locations.up.reshape((-1, self.mcmc.n_iter)),
277
+ ),
278
+ axis=0,
279
+ )
280
+
281
+ combined_model.all_source_locations = ENU(
282
+ ref_altitude=ref_altitude,
283
+ ref_latitude=ref_latitude,
284
+ ref_longitude=ref_longitude,
285
+ east=all_source_locations_east,
286
+ north=all_source_locations_north,
287
+ up=all_source_locations_up,
288
+ )
289
+
290
+ combined_model.emission_rate = emission_rate
291
+ combined_model.label_string = label_string
292
+ combined_model.number_on_sources = np.sum(number_on_sources, axis=0)
293
+ combined_model.individual_source_labels = [item for sublist in individual_source_labels for item in sublist]
294
+ self.components["sources_combined"] = combined_model
295
+
178
296
  def plot_log_posterior(self, burn_in_value: int, plot: Plot = Plot()) -> Plot():
179
297
  """Plots the trace of the log posterior over the iterations of the MCMC.
180
298
 
@@ -9,6 +9,7 @@ Large module containing all the plotting code used to create various plots. Cont
9
9
  definition.
10
10
 
11
11
  """
12
+ import re
12
13
  import warnings
13
14
  from copy import deepcopy
14
15
  from dataclasses import dataclass, field
@@ -16,6 +17,7 @@ from typing import TYPE_CHECKING, Any, Callable, Type, Union
16
17
 
17
18
  import numpy as np
18
19
  import pandas as pd
20
+ import plotly.express as px
19
21
  import plotly.figure_factory as ff
20
22
  import plotly.graph_objects as go
21
23
  from geojson import Feature, FeatureCollection
@@ -167,14 +169,7 @@ def create_trace_specifics(object_to_plot: Union[Type[SlabAndSpike], SourceModel
167
169
  title_text = "Number of Sources 'on' against MCMC iterations"
168
170
  x_label = MCMC_ITERATION_NUMBER_LITERAL
169
171
  y_label = "Number of Sources 'on'"
170
- emission_rates = object_to_plot.emission_rate
171
- if isinstance(object_to_plot, SlabAndSpike):
172
- total_nof_sources = emission_rates.shape[0]
173
- y_values = total_nof_sources - np.sum(object_to_plot.allocation, axis=0)
174
- elif object_to_plot.reversible_jump:
175
- y_values = np.count_nonzero(np.logical_not(np.isnan(emission_rates)), axis=0)
176
- else:
177
- raise TypeError("No plotting routine implemented for this SourceModel type.")
172
+ y_values = object_to_plot.number_on_sources
178
173
  x_values = np.array(range(y_values.size))
179
174
  color = "rgb(248, 156, 116)"
180
175
  name = "Number of Sources 'on'"
@@ -836,13 +831,17 @@ class Plot:
836
831
 
837
832
  for source_idx in range(source_model_object.emission_rate.shape[0]):
838
833
  y_values = source_model_object.emission_rate[source_idx, :]
834
+ if source_model_object.individual_source_labels[source_idx] is not None:
835
+ source_label = source_model_object.individual_source_labels[source_idx]
836
+ else:
837
+ source_label = f"Source {source_idx}"
839
838
 
840
839
  fig = plot_single_scatter(
841
840
  fig=fig,
842
841
  x_values=x_values,
843
842
  y_values=y_values,
844
843
  color=RGB_LIGHT_BLUE,
845
- name=f"Source {source_idx}",
844
+ name=source_label,
846
845
  burn_in=burn_in,
847
846
  show_legend=False,
848
847
  legend_group="Source traces",
@@ -973,20 +972,24 @@ class Plot:
973
972
  def plot_quantification_results_on_map(
974
973
  self,
975
974
  model_object: "ELQModel",
975
+ source_model_to_plot_key: str = None,
976
976
  bin_size_x: float = 1,
977
977
  bin_size_y: float = 1,
978
978
  normalized_count_limit: float = 0.005,
979
979
  burn_in: int = 0,
980
980
  show_summary_results: bool = True,
981
+ show_fixed_source_locations: bool = True,
981
982
  ):
982
983
  """Function to create a map with the quantification results of the model object.
983
984
 
984
- This function takes the ELQModel object and calculates the statistics for the quantification results. It then
985
- populates the figure dictionary with three different maps showing the normalized count, median emission rate
986
- and the inter-quartile range of the emission rate estimates.
985
+ This function takes the "SourceModel" object and calculates the statistics for the quantification results.
986
+ It then populates the figure dictionary with three different maps showing the normalized count,
987
+ median emission rate and the inter-quartile range of the emission rate estimates.
987
988
 
988
989
  Args:
989
990
  model_object (ELQModel): ELQModel object containing the quantification results
991
+ source_model_to_plot_key (str, optional): Key to use in the model_object.components dictionary to access
992
+ the SourceModel object. If None, defaults to "sources_combined".
990
993
  bin_size_x (float, optional): Size of the bins in the x-direction. Defaults to 1.
991
994
  bin_size_y (float, optional): Size of the bins in the y-direction. Defaults to 1.
992
995
  normalized_count_limit (float, optional): Limit for the normalized count to show on the map.
@@ -994,18 +997,30 @@ class Plot:
994
997
  burn_in (int, optional): Number of burn-in iterations to discard before calculating the statistics.
995
998
  Defaults to 0.
996
999
  show_summary_results (bool, optional): Flag to show the summary results on the map. Defaults to True.
1000
+ show_fixed_source_locations (bool, optional): Flag to show the fixed sources location when present in one
1001
+ of the sourcemaps. Defaults to True.
997
1002
 
998
1003
  """
999
- ref_latitude = model_object.components["source"].dispersion_model.source_map.location.ref_latitude
1000
- ref_longitude = model_object.components["source"].dispersion_model.source_map.location.ref_longitude
1001
- ref_altitude = model_object.components["source"].dispersion_model.source_map.location.ref_altitude
1004
+ if source_model_to_plot_key is None:
1005
+ source_model_to_plot_key = "sources_combined"
1006
+
1007
+ source_model = model_object.components[source_model_to_plot_key]
1008
+ sensor_object = model_object.sensor_object
1009
+
1010
+ source_locations = source_model.all_source_locations
1011
+ emission_rates = source_model.emission_rate
1002
1012
 
1003
- datetime_min_string = model_object.sensor_object.time.min().strftime("%d-%b-%Y, %H:%M:%S")
1004
- datetime_max_string = model_object.sensor_object.time.max().strftime("%d-%b-%Y, %H:%M:%S")
1013
+ ref_latitude = source_locations.ref_latitude
1014
+ ref_longitude = source_locations.ref_longitude
1015
+ ref_altitude = source_locations.ref_altitude
1016
+
1017
+ datetime_min_string = sensor_object.time.min().strftime("%d-%b-%Y, %H:%M:%S")
1018
+ datetime_max_string = sensor_object.time.max().strftime("%d-%b-%Y, %H:%M:%S")
1005
1019
 
1006
1020
  result_weighted, _, normalized_count, count_boolean, enu_points, summary_result = (
1007
1021
  calculate_rectangular_statistics(
1008
- model_object=model_object,
1022
+ emission_rates=emission_rates,
1023
+ source_locations=source_locations,
1009
1024
  bin_size_x=bin_size_x,
1010
1025
  bin_size_y=bin_size_y,
1011
1026
  burn_in=burn_in,
@@ -1043,7 +1058,7 @@ class Plot:
1043
1058
  font_family="Futura",
1044
1059
  font_size=15,
1045
1060
  )
1046
- model_object.sensor_object.plot_sensor_location(self.figure_dict["count_map"])
1061
+ sensor_object.plot_sensor_location(self.figure_dict["count_map"])
1047
1062
  self.figure_dict["count_map"].update_traces(showlegend=False)
1048
1063
 
1049
1064
  adjusted_result_weights = result_weighted.copy()
@@ -1069,7 +1084,7 @@ class Plot:
1069
1084
  font_family="Futura",
1070
1085
  font_size=15,
1071
1086
  )
1072
- model_object.sensor_object.plot_sensor_location(self.figure_dict["median_map"])
1087
+ sensor_object.plot_sensor_location(self.figure_dict["median_map"])
1073
1088
  self.figure_dict["median_map"].update_traces(showlegend=False)
1074
1089
 
1075
1090
  iqr_of_all_emissions = np.nanquantile(a=adjusted_result_weights, q=0.75, axis=2) - np.nanquantile(
@@ -1094,9 +1109,28 @@ class Plot:
1094
1109
  font_family="Futura",
1095
1110
  font_size=15,
1096
1111
  )
1097
- model_object.sensor_object.plot_sensor_location(self.figure_dict["iqr_map"])
1112
+ sensor_object.plot_sensor_location(self.figure_dict["iqr_map"])
1098
1113
  self.figure_dict["iqr_map"].update_traces(showlegend=False)
1099
1114
 
1115
+ if show_fixed_source_locations:
1116
+ for key, _ in model_object.components.items():
1117
+ if bool(re.search("fixed", key)):
1118
+ source_model_fixed = model_object.components[key]
1119
+ source_locations_fixed = source_model_fixed.all_source_locations
1120
+ source_location_fixed_lla = source_locations_fixed.to_lla()
1121
+ sources_lat = source_location_fixed_lla.latitude[:, 0]
1122
+ sources_lon = source_location_fixed_lla.longitude[:, 0]
1123
+ fixed_source_location_trace = go.Scattermap(
1124
+ mode="markers",
1125
+ lon=sources_lon,
1126
+ lat=sources_lat,
1127
+ name=f"Fixed source locations, {key}",
1128
+ marker={"size": 10, "opacity": 0.8},
1129
+ )
1130
+ self.figure_dict["count_map"].add_trace(fixed_source_location_trace)
1131
+ self.figure_dict["median_map"].add_trace(fixed_source_location_trace)
1132
+ self.figure_dict["iqr_map"].add_trace(fixed_source_location_trace)
1133
+
1100
1134
  if show_summary_results:
1101
1135
  self.figure_dict["count_map"].add_trace(summary_trace)
1102
1136
  self.figure_dict["count_map"].update_traces(showlegend=True)
@@ -52,7 +52,8 @@ def is_regularly_spaced(array: np.ndarray, tolerance: float = 0.01, return_delta
52
52
 
53
53
 
54
54
  def calculate_rectangular_statistics(
55
- model_object: "ELQModel",
55
+ emission_rates: np.ndarray,
56
+ source_locations: ENU,
56
57
  bin_size_x: float = 1,
57
58
  bin_size_y: float = 1,
58
59
  burn_in: int = 0,
@@ -70,7 +71,10 @@ def calculate_rectangular_statistics(
70
71
  likelihood of the blob.
71
72
 
72
73
  Args:
73
- model_object (ELQModel): ELQModel object containing the results of the MCMC run.
74
+ emission_rates (np.ndarray): and array of shape (number_of_sources, number_of_iterations)
75
+ containing emission rate estimates from the MCMC run.
76
+ source_locations (ENU): An object containing the east, north, and up coordinates of source locations,
77
+ as well as reference latitude, longitude, and altitude.
74
78
  bin_size_x (float, optional): Size of the bins in the x-direction. Defaults to 1.
75
79
  bin_size_y (float, optional): Size of the bins in the y-direction. Defaults to 1.
76
80
  burn_in (int, optional): Number of burn-in iterations used in the MCMC. Defaults to 0.
@@ -85,24 +89,9 @@ def calculate_rectangular_statistics(
85
89
  summary_result (pd.DataFrame): Summary statistics for each blob of estimates.
86
90
 
87
91
  """
88
- nof_iterations = model_object.n_iter
89
- ref_latitude = model_object.components["source"].dispersion_model.source_map.location.ref_latitude
90
- ref_longitude = model_object.components["source"].dispersion_model.source_map.location.ref_longitude
91
- ref_altitude = model_object.components["source"].dispersion_model.source_map.location.ref_altitude
92
-
93
- if model_object.components["source"].reversible_jump:
94
- all_source_locations = model_object.mcmc.store["z_src"]
95
- else:
96
- source_locations = (
97
- model_object.components["source"]
98
- .dispersion_model.source_map.location.to_enu(
99
- ref_longitude=ref_longitude, ref_latitude=ref_latitude, ref_altitude=ref_altitude
100
- )
101
- .to_array()
102
- )
103
- all_source_locations = np.repeat(source_locations.T[:, :, np.newaxis], model_object.mcmc.n_iter, axis=2)
92
+ nof_iterations = emission_rates.shape[1]
104
93
 
105
- if np.all(np.isnan(all_source_locations[:2, :, :])):
94
+ if np.all(np.isnan(source_locations.east)):
106
95
  warnings.warn("No sources found")
107
96
  result_weighted = np.array([[[np.nan]]])
108
97
  overall_count = np.array([[0]])
@@ -113,10 +102,10 @@ def calculate_rectangular_statistics(
113
102
 
114
103
  return result_weighted, overall_count, normalized_count, count_boolean, edges_result[:2], summary_result
115
104
 
116
- min_x = np.nanmin(all_source_locations[0, :, :])
117
- max_x = np.nanmax(all_source_locations[0, :, :])
118
- min_y = np.nanmin(all_source_locations[1, :, :])
119
- max_y = np.nanmax(all_source_locations[1, :, :])
105
+ min_x = np.nanmin(source_locations.east)
106
+ max_x = np.nanmax(source_locations.east)
107
+ min_y = np.nanmin(source_locations.north)
108
+ max_y = np.nanmax(source_locations.north)
120
109
 
121
110
  bin_min_x = np.floor(min_x - 0.1)
122
111
  bin_max_x = np.ceil(max_x + 0.1)
@@ -125,19 +114,20 @@ def calculate_rectangular_statistics(
125
114
  bin_min_iteration = burn_in + 0.5
126
115
  bin_max_iteration = nof_iterations + 0.5
127
116
 
128
- max_nof_sources = all_source_locations.shape[1]
117
+ max_nof_sources = source_locations.east.shape[0]
129
118
 
130
119
  x_edges = np.arange(start=bin_min_x, stop=bin_max_x + bin_size_x, step=bin_size_x)
131
120
  y_edges = np.arange(start=bin_min_y, stop=bin_max_y + bin_size_y, step=bin_size_y)
132
121
  iteration_edges = np.arange(start=bin_min_iteration, stop=bin_max_iteration + bin_size_y, step=1)
133
122
 
134
- result_x_vals = all_source_locations[0, :, :].flatten()
135
- result_y_vals = all_source_locations[1, :, :].flatten()
136
- result_z_vals = all_source_locations[2, :, :].flatten()
123
+ result_x_vals = source_locations.east.flatten()
124
+ result_y_vals = source_locations.north.flatten()
125
+ result_z_vals = source_locations.up.flatten()
137
126
 
138
127
  result_iteration_vals = np.array(range(nof_iterations)).reshape(1, -1) + 1
139
128
  result_iteration_vals = np.tile(result_iteration_vals, (max_nof_sources, 1)).flatten()
140
- results_estimates = model_object.mcmc.store["s"].flatten()
129
+
130
+ results_estimates = emission_rates.flatten()
141
131
 
142
132
  result_weighted, _ = np.histogramdd(
143
133
  sample=np.array([result_x_vals, result_y_vals, result_iteration_vals]).T,
@@ -167,11 +157,10 @@ def calculate_rectangular_statistics(
167
157
  x_edges=x_edges,
168
158
  y_edges=y_edges,
169
159
  nof_iterations=nof_iterations,
170
- ref_latitude=ref_latitude,
171
- ref_longitude=ref_longitude,
172
- ref_altitude=ref_altitude,
160
+ ref_latitude=source_locations.ref_latitude,
161
+ ref_longitude=source_locations.ref_longitude,
162
+ ref_altitude=source_locations.ref_altitude,
173
163
  )
174
-
175
164
  return result_weighted, overall_count, normalized_count, count_boolean, edges_result[:2], summary_result
176
165
 
177
166
 
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes