hestia-earth-models 0.61.7__py3-none-any.whl → 0.61.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hestia-earth-models might be problematic. Click here for more details.

Files changed (43) hide show
  1. hestia_earth/models/cycle/completeness/electricityFuel.py +56 -0
  2. hestia_earth/models/emepEea2019/nh3ToAirInorganicFertiliser.py +44 -59
  3. hestia_earth/models/geospatialDatabase/histosol.py +4 -0
  4. hestia_earth/models/ipcc2006/co2ToAirOrganicSoilCultivation.py +4 -2
  5. hestia_earth/models/ipcc2006/n2OToAirOrganicSoilCultivationDirect.py +1 -1
  6. hestia_earth/models/ipcc2019/aboveGroundCropResidueTotal.py +1 -1
  7. hestia_earth/models/ipcc2019/belowGroundCropResidue.py +1 -1
  8. hestia_earth/models/ipcc2019/ch4ToAirExcreta.py +1 -1
  9. hestia_earth/models/ipcc2019/co2ToAirSoilOrganicCarbonStockChangeManagementChange.py +511 -458
  10. hestia_earth/models/ipcc2019/co2ToAirUreaHydrolysis.py +5 -1
  11. hestia_earth/models/ipcc2019/organicCarbonPerHa.py +117 -3881
  12. hestia_earth/models/ipcc2019/organicCarbonPerHa_tier_1_utils.py +2060 -0
  13. hestia_earth/models/ipcc2019/organicCarbonPerHa_tier_2_utils.py +1630 -0
  14. hestia_earth/models/ipcc2019/organicCarbonPerHa_utils.py +324 -0
  15. hestia_earth/models/mocking/search-results.json +252 -252
  16. hestia_earth/models/site/organicCarbonPerHa.py +58 -44
  17. hestia_earth/models/site/soilMeasurement.py +18 -13
  18. hestia_earth/models/utils/__init__.py +28 -0
  19. hestia_earth/models/utils/array_builders.py +578 -0
  20. hestia_earth/models/utils/blank_node.py +2 -3
  21. hestia_earth/models/utils/descriptive_stats.py +285 -0
  22. hestia_earth/models/utils/emission.py +73 -2
  23. hestia_earth/models/utils/inorganicFertiliser.py +2 -2
  24. hestia_earth/models/utils/measurement.py +118 -4
  25. hestia_earth/models/version.py +1 -1
  26. {hestia_earth_models-0.61.7.dist-info → hestia_earth_models-0.61.8.dist-info}/METADATA +1 -1
  27. {hestia_earth_models-0.61.7.dist-info → hestia_earth_models-0.61.8.dist-info}/RECORD +43 -31
  28. tests/models/cycle/completeness/test_electricityFuel.py +21 -0
  29. tests/models/emepEea2019/test_nh3ToAirInorganicFertiliser.py +2 -2
  30. tests/models/ipcc2019/test_co2ToAirSoilOrganicCarbonStockChangeManagementChange.py +54 -165
  31. tests/models/ipcc2019/test_organicCarbonPerHa.py +219 -460
  32. tests/models/ipcc2019/test_organicCarbonPerHa_tier_1_utils.py +471 -0
  33. tests/models/ipcc2019/test_organicCarbonPerHa_tier_2_utils.py +208 -0
  34. tests/models/ipcc2019/test_organicCarbonPerHa_utils.py +75 -0
  35. tests/models/site/test_organicCarbonPerHa.py +3 -12
  36. tests/models/site/test_soilMeasurement.py +3 -18
  37. tests/models/utils/test_array_builders.py +253 -0
  38. tests/models/utils/test_descriptive_stats.py +134 -0
  39. tests/models/utils/test_emission.py +51 -1
  40. tests/models/utils/test_measurement.py +54 -2
  41. {hestia_earth_models-0.61.7.dist-info → hestia_earth_models-0.61.8.dist-info}/LICENSE +0 -0
  42. {hestia_earth_models-0.61.7.dist-info → hestia_earth_models-0.61.8.dist-info}/WHEEL +0 -0
  43. {hestia_earth_models-0.61.7.dist-info → hestia_earth_models-0.61.8.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,285 @@
1
+ from collections.abc import Iterable
2
+ from enum import Enum
3
+ from functools import reduce
4
+ from numpy import abs, around, exp, float64, inf, pi, sign, sqrt
5
+ from numpy.typing import NDArray
6
+ from typing import Optional, Union
7
+
8
+
9
+ def calc_z_critical(
10
+ confidence_interval: float,
11
+ n_sided: int = 2
12
+ ) -> float64:
13
+ """
14
+ Calculate the z-critical value from the confidence interval.
15
+
16
+ Parameters
17
+ ----------
18
+ confidence_interval : float
19
+ The confidence interval as a percentage between 0 and 100%.
20
+ n_sided : int, optional
21
+ The number of tails (default value = `2`).
22
+
23
+ Returns
24
+ -------
25
+ float64
26
+ The z-critical value as a floating point between 0 and infinity.
27
+ """
28
+ alpha = 1 - confidence_interval / 100
29
+ return _normal_ppf(1 - alpha / n_sided)
30
+
31
+
32
+ def _normal_ppf(q: float64, tol: float64 = 1e-10) -> float64:
33
+ """
34
+ Calculates the percent point function (PPF), also known as the inverse cumulative distribution function (CDF), of a
35
+ standard normal distribution using the Newton-Raphson method.
36
+
37
+ Parameters
38
+ ----------
39
+ q : float64
40
+ The quantile at which to evaluate the PPF.
41
+ tol : float64, optional
42
+ The tolerance for the Newton-Raphson method. Defaults to 1e-10.
43
+
44
+ Returns
45
+ -------
46
+ float64
47
+ The PPF value at the given quantile.
48
+ """
49
+ INITIAL_GUESS = 0
50
+ MAX_ITER = 100
51
+
52
+ def step(x):
53
+ """Perform one step of the Newton-Raphson method."""
54
+ x_new = x - (_normal_cdf(x) - q) / _normal_pdf(x)
55
+ return x_new if abs(x_new - x) >= tol else x
56
+
57
+ return (
58
+ inf if q == 1 else
59
+ -inf if q == 0 else
60
+ reduce(lambda x, _: step(x), range(MAX_ITER), INITIAL_GUESS)
61
+ )
62
+
63
+
64
+ def _normal_cdf(x: float64) -> float64:
65
+ """
66
+ Calculates the cumulative distribution function (CDF) of a standard normal distribution for a single value using a
67
+ custom error function (erf).
68
+
69
+ Parameters
70
+ ----------
71
+ x : float64
72
+ The point at which to evaluate the CDF.
73
+
74
+ Returns
75
+ -------
76
+ float64
77
+ The CDF value at the given point.
78
+ """
79
+ return 0.5 * (1 + _erf(x / sqrt(2)))
80
+
81
+
82
+ def _erf(x: float64) -> float64:
83
+ """
84
+ Approximates the error function of a standard normal distribution using a numerical approximation based on
85
+ Abramowitz and Stegun formula 7.1.26.
86
+
87
+ Parameters
88
+ ----------
89
+ x : float64
90
+ The input value.
91
+
92
+ Returns
93
+ -------
94
+ float64
95
+ The approximated value of the error function.
96
+ """
97
+ # constants
98
+ A_1 = 0.254829592
99
+ A_2 = -0.284496736
100
+ A_3 = 1.421413741
101
+ A_4 = -1.453152027
102
+ A_5 = 1.061405429
103
+ P = 0.3275911
104
+
105
+ # Save the sign of x
106
+ sign_ = sign(x)
107
+ x_ = abs(x)
108
+
109
+ # A&S formula 7.1.26
110
+ t = 1.0 / (1.0 + P * x_)
111
+ y = 1.0 - (((((A_5 * t + A_4) * t) + A_3) * t + A_2) * t + A_1) * t * exp(-x_ * x_)
112
+
113
+ return sign_ * y
114
+
115
+
116
+ def _normal_pdf(x: float64) -> float64:
117
+ """
118
+ Calculates the probability density function (PDF) of a standard normal distribution for a single value.
119
+
120
+ Parameters
121
+ ----------
122
+ x : float64
123
+ The point at which to evaluate the PDF.
124
+
125
+ Returns
126
+ -------
127
+ float64
128
+ The PDF value at the given point.
129
+ """
130
+ return 1 / sqrt(2 * pi) * exp(-0.5 * x**2)
131
+
132
+
133
+ def _calc_confidence_level(
134
+ z_critical: float64,
135
+ n_sided: int = 2
136
+ ) -> float64:
137
+ """
138
+ Calculate the confidence interval from the z-critical value.
139
+
140
+ Parameters
141
+ ----------
142
+ z_critical_value : np.float64
143
+ The confidence interval as a floating point number between 0 and infinity.
144
+ n_sided : int, optional
145
+ The number of tails (default value = `2`).
146
+
147
+ Returns
148
+ -------
149
+ np.float64
150
+ The confidence interval as a percentage between 0 and 100%.
151
+ """
152
+ alpha = (1 - _normal_cdf(z_critical)) * n_sided
153
+ return (1 - alpha) * 100
154
+
155
+
156
+ def calc_required_iterations_monte_carlo(
157
+ confidence_level: float,
158
+ precision: float,
159
+ sd: float
160
+ ) -> int:
161
+ """
162
+ Calculate the number of iterations required for a Monte Carlo simulation to have a desired precision, subject to a
163
+ given confidence level.
164
+
165
+ Parameters
166
+ ----------
167
+ confidence_level : float
168
+ The confidence level, as a percentage out of 100, that the precision should be subject too (i.e., we are x%
169
+ sure that the sample mean deviates from the true populatation mean by less than the desired precision).
170
+ precision : float
171
+ The desired precision as a floating point value (i.e., if the Monte Carlo simulation will be used to estimate
172
+ `organicCarbonPerHa` to a precision of 100 kg C ha-1 this value should be 100).
173
+ sd : float
174
+ The standard deviation of the sample. This can be estimated by running the model 500 times (a number that does
175
+ not take too much time to run but is large enough for the sample standard deviation to converge reasonably
176
+ well).
177
+
178
+ Returns
179
+ -------
180
+ int
181
+ The required number of iterations.
182
+ """
183
+ z_critical_value = calc_z_critical(confidence_level)
184
+ return round(((sd * z_critical_value) / precision) ** 2)
185
+
186
+
187
+ def calc_confidence_level_monte_carlo(
188
+ n_iterations: int,
189
+ precision: float,
190
+ sd: float
191
+ ) -> float:
192
+ """
193
+ Calculate the confidence level that the sample mean calculated by the Monte Carlo simulation deviates from the
194
+ true population mean by less than the desired precision.
195
+
196
+ Parameters
197
+ ----------
198
+ n_iterations : int
199
+ The number of iterations that the Monte Carlo simulation was run for.
200
+ precision : float
201
+ The desired precision as a floating point value (i.e., if the Monte Carlo simulation will be used to estimate
202
+ `organicCarbonPerHa` to a precision of 100 kg C ha-1 this value should be 100).
203
+ sd : float
204
+ The standard deviation of the sample.
205
+
206
+ Returns
207
+ -------
208
+ float
209
+ The confidence level, as a percentage out of 100, that the precision should be subject too (i.e., we are x%
210
+ sure that the sample mean deviates from the true populatation mean by less than the desired precision).
211
+ """
212
+ return _calc_confidence_level(precision*sqrt(n_iterations)/sd)
213
+
214
+
215
+ def calc_precision_monte_carlo(
216
+ confidence_level: float,
217
+ n_iterations: int,
218
+ sd: float
219
+ ) -> float:
220
+ """
221
+ Calculate the +/- precision of a Monte Carlo simulation for a desired confidence level.
222
+
223
+ Parameters
224
+ ----------
225
+ confidence_level : float
226
+ The confidence level, as a percentage out of 100, that the precision should be subject too (i.e., we are x%
227
+ sure that the sample mean deviates from the true populatation mean by less than the desired precision).
228
+ n_iterations : int
229
+ The number of iterations that the Monte Carlo simulation was run for.
230
+ sd : float
231
+ The standard deviation of the sample.
232
+
233
+ Returns
234
+ -------
235
+ float
236
+ The precision of the sample mean estimated by the Monte Carlo model as a floating point value with the same
237
+ units as the estimated mean.
238
+ """
239
+ z_critical = calc_z_critical(confidence_level)
240
+ return (sd*z_critical)/sqrt(n_iterations)
241
+
242
+
243
+ def calc_descriptive_stats(
244
+ arr: NDArray,
245
+ stats_definition: Union[Enum, str],
246
+ axis: Optional[int] = None,
247
+ decimals: int = 6
248
+ ) -> dict:
249
+ """
250
+ Calculate the descriptive stats for an array row-wise, round them to specified number of decimal places and return
251
+ them formatted for a HESTIA node.
252
+
253
+ Parameters
254
+ ----------
255
+ arr : NDArray
256
+ stats_definition : Enum | str
257
+ axis : int | None
258
+ decimals : int
259
+
260
+ Returns
261
+ -------
262
+ float
263
+ The precision of the sample mean estimated by the Monte Carlo model as a floating point value with the same
264
+ units as the estimated mean.
265
+ """
266
+ value = around(arr.mean(axis=axis), decimals)
267
+ sd = around(arr.std(axis=axis), decimals)
268
+ min_ = around(arr.min(axis=axis), decimals)
269
+ max_ = around(arr.max(axis=axis), decimals)
270
+
271
+ rows, columns = arr.shape
272
+ observations = (
273
+ [rows] * columns if axis == 0
274
+ else [columns] * rows if axis == 1
275
+ else [arr.size]
276
+ )
277
+
278
+ return {
279
+ "value": list(value) if isinstance(value, Iterable) else [value],
280
+ "sd": list(sd) if isinstance(sd, Iterable) else [sd],
281
+ "min": list(min_) if isinstance(min_, Iterable) else [min_],
282
+ "max": list(max_) if isinstance(max_, Iterable) else [max_],
283
+ "statsDefinition": stats_definition.value if isinstance(stats_definition, Enum) else stats_definition,
284
+ "observations": observations
285
+ }
@@ -1,13 +1,19 @@
1
- from hestia_earth.schema import SchemaType
1
+ from collections.abc import Iterable
2
+ from typing import Optional, Union
3
+
4
+ from hestia_earth.schema import EmissionMethodTier, SchemaType
2
5
  from hestia_earth.utils.api import download_hestia
3
6
  from hestia_earth.utils.model import linked_node
4
7
  from hestia_earth.utils.lookup import get_table_value, download_lookup, column_name
5
8
 
6
- from . import _term_id, _include_methodModel
9
+ from . import _term_id, _include_methodModel, flatten_args
7
10
  from .blank_node import find_terms_value
8
11
  from .constant import Units, get_atomic_conversion
9
12
 
10
13
 
14
+ EMISSION_METHOD_TIERS = [e.value for e in EmissionMethodTier]
15
+
16
+
11
17
  def _new_emission(term, model=None):
12
18
  node = {'@type': SchemaType.EMISSION.value}
13
19
  node['term'] = linked_node(term if isinstance(term, dict) else download_hestia(_term_id(term)))
@@ -32,3 +38,68 @@ def get_nh3_no3_nox_to_n(cycle: dict, nh3_term_id: str, no3_term_id: str, nox_te
32
38
  nox = None if nox is None else nox / get_atomic_conversion(Units.KG_NOX, Units.TO_N)
33
39
 
34
40
  return (nh3, no3, nox)
41
+
42
+
43
+ _EMISSION_METHOD_TIER_RANKING = [
44
+ EmissionMethodTier.MEASURED,
45
+ EmissionMethodTier.TIER_3,
46
+ EmissionMethodTier.TIER_2,
47
+ EmissionMethodTier.TIER_1,
48
+ EmissionMethodTier.BACKGROUND,
49
+ EmissionMethodTier.NOT_RELEVANT
50
+ ]
51
+ """
52
+ A ranking of `EmissionMethodTier`s from strongest to weakest.
53
+ """
54
+
55
+
56
+ _EmissionMethodTiers = Union[EmissionMethodTier, str, Iterable[Union[EmissionMethodTier, str]]]
57
+ """
58
+ A type alias for a single emission method tier, as either an EmissionMethodTier enum or string, or multiple emission
59
+ method tiers, as either an iterable of EmissionMethodTier enums or strings.
60
+ """
61
+
62
+
63
+ def min_emission_method_tier(*methods: _EmissionMethodTiers) -> EmissionMethodTier:
64
+ """
65
+ Get the minimum ranking emission method tier from the provided methods.
66
+
67
+ n.b., `max` function is used as weaker methods have higher indices.
68
+
69
+ Parameters
70
+ ----------
71
+ *methods : EmissionMethodTier | str | Iterable[EmissionMethodTier] | Iterable[str]
72
+ Emission method tiers or iterables of emission method tiers.
73
+
74
+ Returns
75
+ -------
76
+ EmissionMethodTier
77
+ The emission method tier method with the minimum ranking.
78
+ """
79
+ methods_ = [to_emission_method_tier(arg) for arg in flatten_args(methods)]
80
+ return max(
81
+ methods_,
82
+ key=lambda method: _EMISSION_METHOD_TIER_RANKING.index(method),
83
+ default=_EMISSION_METHOD_TIER_RANKING[-1]
84
+ )
85
+
86
+
87
+ def to_emission_method_tier(method: Union[EmissionMethodTier, str]) -> Optional[EmissionMethodTier]:
88
+ """
89
+ Convert the input str to an `EmissionMethodTier` if possible.
90
+
91
+ Parameters
92
+ ----------
93
+ method : EmissionMethodTier | str
94
+ The emission method tier as either a `str` or `EmissionMethodTier`.
95
+
96
+ Returns
97
+ -------
98
+ EmissionMethodTier | None
99
+ The matching `EmissionMethodTier` or `None` if invalid string.
100
+ """
101
+ return (
102
+ method if isinstance(method, EmissionMethodTier)
103
+ else EmissionMethodTier(method) if method in EMISSION_METHOD_TIERS
104
+ else None
105
+ )
@@ -31,14 +31,14 @@ def get_NH3_emission_factor(term_id: str, soilPh: float, temperature: float):
31
31
  soilPh_key = _get_soilPh_lookup_key(soilPh)
32
32
  temperature_key = _get_temperature_lookup_key(temperature)
33
33
  data = get_term_lookup(term_id, f"NH3_emissions_factor_{soilPh_key}")
34
- return safe_parse_float(extract_grouped_data(data, temperature_key), 1)
34
+ return safe_parse_float(extract_grouped_data(data, temperature_key), None)
35
35
 
36
36
 
37
37
  def get_country_breakdown(model: str, term_id: str, country_id: str, col_name: str):
38
38
  lookup = download_lookup(BREAKDOWN_LOOKUP)
39
39
  value = get_table_value(lookup, 'termid', country_id, column_name(col_name))
40
40
  debugMissingLookup(BREAKDOWN_LOOKUP, 'termid', country_id, col_name, value, model=model, term=term_id)
41
- return safe_parse_float(value, 1)
41
+ return safe_parse_float(value, None)
42
42
 
43
43
 
44
44
  def get_cycle_inputs(cycle: dict):
@@ -1,18 +1,19 @@
1
+ from collections import defaultdict
2
+ from collections.abc import Iterable
1
3
  from functools import reduce
2
4
  from dateutil import parser
3
5
  from statistics import mode, mean
4
- from typing import Any, Union
6
+ from typing import Any, Optional, Union
5
7
 
6
- from hestia_earth.schema import SchemaType
8
+ from hestia_earth.schema import MeasurementMethodClassification, SchemaType
7
9
  from hestia_earth.utils.api import download_hestia
8
10
  from hestia_earth.utils.model import linked_node
9
11
  from hestia_earth.utils.tools import non_empty_list, flatten, safe_parse_float
10
12
  from hestia_earth.utils.date import diff_in_days
11
13
 
12
- from . import _term_id, _include_method
14
+ from . import _term_id, _include_method, flatten_args
13
15
  from .term import get_lookup_value
14
16
 
15
- # from hestia_earth.models.site.utils import _has_all_months
16
17
 
17
18
  # TODO: verify those values
18
19
  MAX_DEPTH = 1000
@@ -24,6 +25,8 @@ MEASUREMENT_REDUCE = {
24
25
  'sum': lambda value: sum(value)
25
26
  }
26
27
 
28
+ MEASUREMENT_METHOD_CLASSIFICATIONS = [e.value for e in MeasurementMethodClassification]
29
+
27
30
 
28
31
  def _new_measurement(term, model=None):
29
32
  node = {'@type': SchemaType.MEASUREMENT.value}
@@ -207,3 +210,114 @@ def most_relevant_measurement_value_by_depth_and_date(
207
210
  nearest_value, nearest_date = min(zip(values, dates), key=lambda i: date_distance(i[1]), default=(default, None))
208
211
 
209
212
  return nearest_value, nearest_date
213
+
214
+
215
+ _MEASUREMENT_METHOD_CLASSIFICATION_RANKING = [
216
+ MeasurementMethodClassification.ON_SITE_PHYSICAL_MEASUREMENT,
217
+ MeasurementMethodClassification.MODELLED_USING_OTHER_MEASUREMENTS,
218
+ MeasurementMethodClassification.TIER_3_MODEL,
219
+ MeasurementMethodClassification.TIER_2_MODEL,
220
+ MeasurementMethodClassification.TIER_1_MODEL,
221
+ MeasurementMethodClassification.PHYSICAL_MEASUREMENT_ON_NEARBY_SITE,
222
+ MeasurementMethodClassification.GEOSPATIAL_DATASET,
223
+ MeasurementMethodClassification.REGIONAL_STATISTICAL_DATA,
224
+ MeasurementMethodClassification.COUNTRY_LEVEL_STATISTICAL_DATA,
225
+ MeasurementMethodClassification.EXPERT_OPINION,
226
+ MeasurementMethodClassification.UNSOURCED_ASSUMPTION
227
+ ]
228
+ """
229
+ A ranking of `MeasurementMethodClassification`s from strongest to weakest.
230
+ """
231
+
232
+ _MeasurementMethodClassifications = Union[
233
+ MeasurementMethodClassification, str, Iterable[Union[MeasurementMethodClassification, str]]
234
+ ]
235
+ """
236
+ A type alias for a single measurement method classification, as either an MeasurementMethodClassification enum or
237
+ string, or multiple measurement method classification, as either an iterable of MeasurementMethodClassification enums
238
+ or strings.
239
+ """
240
+
241
+
242
+ def min_measurement_method_classification(
243
+ *methods: _MeasurementMethodClassifications
244
+ ) -> MeasurementMethodClassification:
245
+ """
246
+ Get the minimum ranking measurement method from the provided methods.
247
+
248
+ n.b., `max` function is used as weaker methods have higher indices.
249
+
250
+ Parameters
251
+ ----------
252
+ *methods : MeasurementMethodClassification | str | Iterable[MeasurementMethodClassification] | Iterable[str]
253
+ Measurement method classifications or iterables of measurement method classification.
254
+
255
+ Returns
256
+ -------
257
+ MeasurementMethodClassification
258
+ The measurement method classification with the minimum ranking.
259
+ """
260
+ methods_ = [to_measurement_method_classification(arg) for arg in flatten_args(methods)]
261
+ return max(
262
+ methods_,
263
+ key=lambda method: _MEASUREMENT_METHOD_CLASSIFICATION_RANKING.index(method),
264
+ default=_MEASUREMENT_METHOD_CLASSIFICATION_RANKING[-1]
265
+ )
266
+
267
+
268
+ def to_measurement_method_classification(
269
+ method: Union[MeasurementMethodClassification, str]
270
+ ) -> Optional[MeasurementMethodClassification]:
271
+ """
272
+ Convert the input to a `MeasurementMethodClassification` if possible.
273
+
274
+ Parameters
275
+ ----------
276
+ method : MeasurementMethodClassification | str
277
+ The measurement method as either a `str` or `MeasurementMethodClassification`.
278
+
279
+ Returns
280
+ -------
281
+ MeasurementMethodClassification | None
282
+ The matching `MeasurementMethodClassification` or `None` if invalid string.
283
+ """
284
+ return (
285
+ method if isinstance(method, MeasurementMethodClassification)
286
+ else MeasurementMethodClassification(method) if method in MEASUREMENT_METHOD_CLASSIFICATIONS
287
+ else None
288
+ )
289
+
290
+
291
+ def group_measurements_by_method_classification(
292
+ nodes: list[dict]
293
+ ) -> dict[MeasurementMethodClassification, list[dict]]:
294
+ """
295
+ Group [Measurement](https://www.hestia.earth/schema/Measurement) nodes by their method classification.
296
+
297
+ The returned dict has the shape:
298
+ ```
299
+ {
300
+ method (MeasurementMethodClassification): nodes (list[dict]),
301
+ ...methods
302
+ }
303
+ ```
304
+
305
+ Parameters
306
+ ----------
307
+ nodes : list[dict]
308
+ A list of Measurement nodes.
309
+
310
+ Returns
311
+ -------
312
+ dict[MeasurementMethodClassification, list[dict]]
313
+ The measurement nodes grouped by method classification.
314
+ """
315
+ valid_nodes = (node for node in nodes if node.get("@type") == SchemaType.MEASUREMENT.value)
316
+
317
+ def group_node(groups: dict, node: dict) -> list[dict]:
318
+ measurement_method_classification = MeasurementMethodClassification(node.get("methodClassification"))
319
+ groups[measurement_method_classification].append(node)
320
+ return groups
321
+
322
+ grouped_nodes = reduce(group_node, valid_nodes, defaultdict(list))
323
+ return dict(grouped_nodes)
@@ -1 +1 @@
1
- VERSION = '0.61.7'
1
+ VERSION = '0.61.8'
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: hestia-earth-models
3
- Version: 0.61.7
3
+ Version: 0.61.8
4
4
  Summary: Hestia's set of modules for filling gaps in the activity data using external datasets (e.g. populating soil properties with a geospatial dataset using provided coordinates) and internal lookups (e.g. populating machinery use from fuel use). Includes rules for when gaps should be filled versus not (e.g. never gap fill yield, gap fill crop residue if yield provided etc.).
5
5
  Home-page: https://gitlab.com/hestia-earth/hestia-engine-models
6
6
  Author: Hestia Team