hestia-earth-models 0.64.4__py3-none-any.whl → 0.64.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hestia-earth-models might be problematic. Click here for more details.
- hestia_earth/models/blonkConsultants2016/ch4ToAirNaturalVegetationBurning.py +5 -9
- hestia_earth/models/blonkConsultants2016/co2ToAirAboveGroundBiomassStockChangeLandUseChange.py +5 -9
- hestia_earth/models/blonkConsultants2016/n2OToAirNaturalVegetationBurningDirect.py +6 -13
- hestia_earth/models/cycle/animal/input/properties.py +6 -0
- hestia_earth/models/cycle/completeness/soilAmendment.py +3 -2
- hestia_earth/models/cycle/concentrateFeed.py +10 -4
- hestia_earth/models/cycle/input/properties.py +6 -0
- hestia_earth/models/cycle/liveAnimal.py +2 -2
- hestia_earth/models/cycle/milkYield.py +3 -3
- hestia_earth/models/cycle/otherSitesArea.py +59 -0
- hestia_earth/models/cycle/otherSitesUnusedDuration.py +9 -8
- hestia_earth/models/cycle/pastureSystem.py +3 -2
- hestia_earth/models/cycle/product/properties.py +6 -0
- hestia_earth/models/cycle/siteArea.py +83 -0
- hestia_earth/models/cycle/stockingDensityAnimalHousingAverage.py +28 -16
- hestia_earth/models/cycle/utils.py +1 -1
- hestia_earth/models/environmentalFootprintV3/soilQualityIndexLandOccupation.py +128 -0
- hestia_earth/models/environmentalFootprintV3/utils.py +17 -0
- hestia_earth/models/ipcc2006/co2ToAirOrganicSoilCultivation.py +17 -6
- hestia_earth/models/ipcc2006/n2OToAirOrganicSoilCultivationDirect.py +17 -6
- hestia_earth/models/ipcc2019/co2ToAirCarbonStockChange_utils.py +904 -0
- hestia_earth/models/ipcc2019/co2ToAirSoilOrganicCarbonStockChangeManagementChange.py +70 -618
- hestia_earth/models/mocking/search-results.json +395 -323
- hestia_earth/models/pooreNemecek2018/saplings.py +10 -7
- hestia_earth/models/site/management.py +18 -14
- hestia_earth/models/utils/__init__.py +38 -0
- hestia_earth/models/utils/array_builders.py +63 -52
- hestia_earth/models/utils/blank_node.py +137 -82
- hestia_earth/models/utils/descriptive_stats.py +3 -239
- hestia_earth/models/utils/feedipedia.py +15 -2
- hestia_earth/models/utils/landCover.py +9 -0
- hestia_earth/models/utils/lookup.py +13 -2
- hestia_earth/models/utils/measurement.py +3 -28
- hestia_earth/models/utils/stats.py +429 -0
- hestia_earth/models/utils/term.py +15 -3
- hestia_earth/models/utils/time_series.py +90 -0
- hestia_earth/models/version.py +1 -1
- {hestia_earth_models-0.64.4.dist-info → hestia_earth_models-0.64.5.dist-info}/METADATA +1 -1
- {hestia_earth_models-0.64.4.dist-info → hestia_earth_models-0.64.5.dist-info}/RECORD +62 -48
- tests/models/blonkConsultants2016/test_ch4ToAirNaturalVegetationBurning.py +2 -2
- tests/models/blonkConsultants2016/test_co2ToAirAboveGroundBiomassStockChangeLandUseChange.py +2 -2
- tests/models/blonkConsultants2016/test_n2OToAirNaturalVegetationBurningDirect.py +2 -2
- tests/models/cycle/completeness/test_soilAmendment.py +1 -1
- tests/models/cycle/test_liveAnimal.py +1 -1
- tests/models/cycle/test_milkYield.py +1 -1
- tests/models/cycle/test_otherSitesArea.py +68 -0
- tests/models/cycle/test_siteArea.py +51 -0
- tests/models/cycle/test_stockingDensityAnimalHousingAverage.py +2 -2
- tests/models/environmentalFootprintV3/test_soilQualityIndexLandOccupation.py +136 -0
- tests/models/ipcc2019/test_co2ToAirCarbonStockChange_utils.py +50 -0
- tests/models/ipcc2019/test_co2ToAirSoilOrganicCarbonStockChangeManagementChange.py +1 -39
- tests/models/pooreNemecek2018/test_saplings.py +1 -1
- tests/models/site/test_management.py +3 -153
- tests/models/utils/test_array_builders.py +67 -6
- tests/models/utils/test_blank_node.py +191 -7
- tests/models/utils/test_descriptive_stats.py +2 -86
- tests/models/utils/test_measurement.py +1 -22
- tests/models/utils/test_stats.py +186 -0
- tests/models/utils/test_time_series.py +88 -0
- {hestia_earth_models-0.64.4.dist-info → hestia_earth_models-0.64.5.dist-info}/LICENSE +0 -0
- {hestia_earth_models-0.64.4.dist-info → hestia_earth_models-0.64.5.dist-info}/WHEEL +0 -0
- {hestia_earth_models-0.64.4.dist-info → hestia_earth_models-0.64.5.dist-info}/top_level.txt +0 -0
|
@@ -1,245 +1,10 @@
|
|
|
1
1
|
from collections.abc import Iterable
|
|
2
2
|
from enum import Enum
|
|
3
|
-
from
|
|
4
|
-
from numpy import abs, around, exp, float64, inf, pi, sign, sqrt
|
|
3
|
+
from numpy import around
|
|
5
4
|
from numpy.typing import NDArray
|
|
6
5
|
from typing import Optional, Union
|
|
7
6
|
|
|
8
7
|
|
|
9
|
-
def calc_z_critical(
|
|
10
|
-
confidence_interval: float,
|
|
11
|
-
n_sided: int = 2
|
|
12
|
-
) -> float64:
|
|
13
|
-
"""
|
|
14
|
-
Calculate the z-critical value from the confidence interval.
|
|
15
|
-
|
|
16
|
-
Parameters
|
|
17
|
-
----------
|
|
18
|
-
confidence_interval : float
|
|
19
|
-
The confidence interval as a percentage between 0 and 100%.
|
|
20
|
-
n_sided : int, optional
|
|
21
|
-
The number of tails (default value = `2`).
|
|
22
|
-
|
|
23
|
-
Returns
|
|
24
|
-
-------
|
|
25
|
-
float64
|
|
26
|
-
The z-critical value as a floating point between 0 and infinity.
|
|
27
|
-
"""
|
|
28
|
-
alpha = 1 - confidence_interval / 100
|
|
29
|
-
return _normal_ppf(1 - alpha / n_sided)
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
def _normal_ppf(q: float64, tol: float64 = 1e-10) -> float64:
|
|
33
|
-
"""
|
|
34
|
-
Calculates the percent point function (PPF), also known as the inverse cumulative distribution function (CDF), of a
|
|
35
|
-
standard normal distribution using the Newton-Raphson method.
|
|
36
|
-
|
|
37
|
-
Parameters
|
|
38
|
-
----------
|
|
39
|
-
q : float64
|
|
40
|
-
The quantile at which to evaluate the PPF.
|
|
41
|
-
tol : float64, optional
|
|
42
|
-
The tolerance for the Newton-Raphson method. Defaults to 1e-10.
|
|
43
|
-
|
|
44
|
-
Returns
|
|
45
|
-
-------
|
|
46
|
-
float64
|
|
47
|
-
The PPF value at the given quantile.
|
|
48
|
-
"""
|
|
49
|
-
INITIAL_GUESS = 0
|
|
50
|
-
MAX_ITER = 100
|
|
51
|
-
|
|
52
|
-
def step(x):
|
|
53
|
-
"""Perform one step of the Newton-Raphson method."""
|
|
54
|
-
x_new = x - (_normal_cdf(x) - q) / _normal_pdf(x)
|
|
55
|
-
return x_new if abs(x_new - x) >= tol else x
|
|
56
|
-
|
|
57
|
-
return (
|
|
58
|
-
inf if q == 1 else
|
|
59
|
-
-inf if q == 0 else
|
|
60
|
-
reduce(lambda x, _: step(x), range(MAX_ITER), INITIAL_GUESS)
|
|
61
|
-
)
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
def _normal_cdf(x: float64) -> float64:
|
|
65
|
-
"""
|
|
66
|
-
Calculates the cumulative distribution function (CDF) of a standard normal distribution for a single value using a
|
|
67
|
-
custom error function (erf).
|
|
68
|
-
|
|
69
|
-
Parameters
|
|
70
|
-
----------
|
|
71
|
-
x : float64
|
|
72
|
-
The point at which to evaluate the CDF.
|
|
73
|
-
|
|
74
|
-
Returns
|
|
75
|
-
-------
|
|
76
|
-
float64
|
|
77
|
-
The CDF value at the given point.
|
|
78
|
-
"""
|
|
79
|
-
return 0.5 * (1 + _erf(x / sqrt(2)))
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
def _erf(x: float64) -> float64:
|
|
83
|
-
"""
|
|
84
|
-
Approximates the error function of a standard normal distribution using a numerical approximation based on
|
|
85
|
-
Abramowitz and Stegun formula 7.1.26.
|
|
86
|
-
|
|
87
|
-
Parameters
|
|
88
|
-
----------
|
|
89
|
-
x : float64
|
|
90
|
-
The input value.
|
|
91
|
-
|
|
92
|
-
Returns
|
|
93
|
-
-------
|
|
94
|
-
float64
|
|
95
|
-
The approximated value of the error function.
|
|
96
|
-
"""
|
|
97
|
-
# constants
|
|
98
|
-
A_1 = 0.254829592
|
|
99
|
-
A_2 = -0.284496736
|
|
100
|
-
A_3 = 1.421413741
|
|
101
|
-
A_4 = -1.453152027
|
|
102
|
-
A_5 = 1.061405429
|
|
103
|
-
P = 0.3275911
|
|
104
|
-
|
|
105
|
-
# Save the sign of x
|
|
106
|
-
sign_ = sign(x)
|
|
107
|
-
x_ = abs(x)
|
|
108
|
-
|
|
109
|
-
# A&S formula 7.1.26
|
|
110
|
-
t = 1.0 / (1.0 + P * x_)
|
|
111
|
-
y = 1.0 - (((((A_5 * t + A_4) * t) + A_3) * t + A_2) * t + A_1) * t * exp(-x_ * x_)
|
|
112
|
-
|
|
113
|
-
return sign_ * y
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
def _normal_pdf(x: float64) -> float64:
|
|
117
|
-
"""
|
|
118
|
-
Calculates the probability density function (PDF) of a standard normal distribution for a single value.
|
|
119
|
-
|
|
120
|
-
Parameters
|
|
121
|
-
----------
|
|
122
|
-
x : float64
|
|
123
|
-
The point at which to evaluate the PDF.
|
|
124
|
-
|
|
125
|
-
Returns
|
|
126
|
-
-------
|
|
127
|
-
float64
|
|
128
|
-
The PDF value at the given point.
|
|
129
|
-
"""
|
|
130
|
-
return 1 / sqrt(2 * pi) * exp(-0.5 * x**2)
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
def _calc_confidence_level(
|
|
134
|
-
z_critical: float64,
|
|
135
|
-
n_sided: int = 2
|
|
136
|
-
) -> float64:
|
|
137
|
-
"""
|
|
138
|
-
Calculate the confidence interval from the z-critical value.
|
|
139
|
-
|
|
140
|
-
Parameters
|
|
141
|
-
----------
|
|
142
|
-
z_critical_value : np.float64
|
|
143
|
-
The confidence interval as a floating point number between 0 and infinity.
|
|
144
|
-
n_sided : int, optional
|
|
145
|
-
The number of tails (default value = `2`).
|
|
146
|
-
|
|
147
|
-
Returns
|
|
148
|
-
-------
|
|
149
|
-
np.float64
|
|
150
|
-
The confidence interval as a percentage between 0 and 100%.
|
|
151
|
-
"""
|
|
152
|
-
alpha = (1 - _normal_cdf(z_critical)) * n_sided
|
|
153
|
-
return (1 - alpha) * 100
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
def calc_required_iterations_monte_carlo(
|
|
157
|
-
confidence_level: float,
|
|
158
|
-
precision: float,
|
|
159
|
-
sd: float
|
|
160
|
-
) -> int:
|
|
161
|
-
"""
|
|
162
|
-
Calculate the number of iterations required for a Monte Carlo simulation to have a desired precision, subject to a
|
|
163
|
-
given confidence level.
|
|
164
|
-
|
|
165
|
-
Parameters
|
|
166
|
-
----------
|
|
167
|
-
confidence_level : float
|
|
168
|
-
The confidence level, as a percentage out of 100, that the precision should be subject too (i.e., we are x%
|
|
169
|
-
sure that the sample mean deviates from the true populatation mean by less than the desired precision).
|
|
170
|
-
precision : float
|
|
171
|
-
The desired precision as a floating point value (i.e., if the Monte Carlo simulation will be used to estimate
|
|
172
|
-
`organicCarbonPerHa` to a precision of 100 kg C ha-1 this value should be 100).
|
|
173
|
-
sd : float
|
|
174
|
-
The standard deviation of the sample. This can be estimated by running the model 500 times (a number that does
|
|
175
|
-
not take too much time to run but is large enough for the sample standard deviation to converge reasonably
|
|
176
|
-
well).
|
|
177
|
-
|
|
178
|
-
Returns
|
|
179
|
-
-------
|
|
180
|
-
int
|
|
181
|
-
The required number of iterations.
|
|
182
|
-
"""
|
|
183
|
-
z_critical_value = calc_z_critical(confidence_level)
|
|
184
|
-
return round(((sd * z_critical_value) / precision) ** 2)
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
def calc_confidence_level_monte_carlo(
|
|
188
|
-
n_iterations: int,
|
|
189
|
-
precision: float,
|
|
190
|
-
sd: float
|
|
191
|
-
) -> float:
|
|
192
|
-
"""
|
|
193
|
-
Calculate the confidence level that the sample mean calculated by the Monte Carlo simulation deviates from the
|
|
194
|
-
true population mean by less than the desired precision.
|
|
195
|
-
|
|
196
|
-
Parameters
|
|
197
|
-
----------
|
|
198
|
-
n_iterations : int
|
|
199
|
-
The number of iterations that the Monte Carlo simulation was run for.
|
|
200
|
-
precision : float
|
|
201
|
-
The desired precision as a floating point value (i.e., if the Monte Carlo simulation will be used to estimate
|
|
202
|
-
`organicCarbonPerHa` to a precision of 100 kg C ha-1 this value should be 100).
|
|
203
|
-
sd : float
|
|
204
|
-
The standard deviation of the sample.
|
|
205
|
-
|
|
206
|
-
Returns
|
|
207
|
-
-------
|
|
208
|
-
float
|
|
209
|
-
The confidence level, as a percentage out of 100, that the precision should be subject too (i.e., we are x%
|
|
210
|
-
sure that the sample mean deviates from the true populatation mean by less than the desired precision).
|
|
211
|
-
"""
|
|
212
|
-
return _calc_confidence_level(precision*sqrt(n_iterations)/sd)
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
def calc_precision_monte_carlo(
|
|
216
|
-
confidence_level: float,
|
|
217
|
-
n_iterations: int,
|
|
218
|
-
sd: float
|
|
219
|
-
) -> float:
|
|
220
|
-
"""
|
|
221
|
-
Calculate the +/- precision of a Monte Carlo simulation for a desired confidence level.
|
|
222
|
-
|
|
223
|
-
Parameters
|
|
224
|
-
----------
|
|
225
|
-
confidence_level : float
|
|
226
|
-
The confidence level, as a percentage out of 100, that the precision should be subject too (i.e., we are x%
|
|
227
|
-
sure that the sample mean deviates from the true populatation mean by less than the desired precision).
|
|
228
|
-
n_iterations : int
|
|
229
|
-
The number of iterations that the Monte Carlo simulation was run for.
|
|
230
|
-
sd : float
|
|
231
|
-
The standard deviation of the sample.
|
|
232
|
-
|
|
233
|
-
Returns
|
|
234
|
-
-------
|
|
235
|
-
float
|
|
236
|
-
The precision of the sample mean estimated by the Monte Carlo model as a floating point value with the same
|
|
237
|
-
units as the estimated mean.
|
|
238
|
-
"""
|
|
239
|
-
z_critical = calc_z_critical(confidence_level)
|
|
240
|
-
return (sd*z_critical)/sqrt(n_iterations)
|
|
241
|
-
|
|
242
|
-
|
|
243
8
|
def calc_descriptive_stats(
|
|
244
9
|
arr: NDArray,
|
|
245
10
|
stats_definition: Union[Enum, str],
|
|
@@ -268,10 +33,9 @@ def calc_descriptive_stats(
|
|
|
268
33
|
min_ = around(arr.min(axis=axis), decimals)
|
|
269
34
|
max_ = around(arr.max(axis=axis), decimals)
|
|
270
35
|
|
|
271
|
-
rows, columns = arr.shape
|
|
272
36
|
observations = (
|
|
273
|
-
[
|
|
274
|
-
else [
|
|
37
|
+
[arr.shape[0]] * arr.shape[1] if axis == 0
|
|
38
|
+
else [arr.shape[1]] * arr.shape[0] if axis == 1
|
|
275
39
|
else [arr.size]
|
|
276
40
|
)
|
|
277
41
|
|
|
@@ -4,6 +4,7 @@ from hestia_earth.utils.tools import non_empty_list, safe_parse_float
|
|
|
4
4
|
|
|
5
5
|
from hestia_earth.models.log import logShouldRun
|
|
6
6
|
from .property import _new_property
|
|
7
|
+
from .blank_node import merge_blank_nodes
|
|
7
8
|
|
|
8
9
|
DRY_MATTER_TERM_ID = 'dryMatter'
|
|
9
10
|
|
|
@@ -17,6 +18,12 @@ def get_feedipedia_properties():
|
|
|
17
18
|
return term_ids
|
|
18
19
|
|
|
19
20
|
|
|
21
|
+
def _should_rescale_by_dm(property_id: str):
|
|
22
|
+
lookup = download_lookup('property.csv')
|
|
23
|
+
value = get_table_value(lookup, 'termid', property_id, column_name('feedipediaConversionEnum'))
|
|
24
|
+
return 'dm' in value
|
|
25
|
+
|
|
26
|
+
|
|
20
27
|
def _dm_property(term_id: str, property_values: dict, dm_property_values: dict, dry_matter_property: dict):
|
|
21
28
|
blank_node = _new_property(term_id)
|
|
22
29
|
blank_node_data = {}
|
|
@@ -25,7 +32,11 @@ def _dm_property(term_id: str, property_values: dict, dm_property_values: dict,
|
|
|
25
32
|
old_dm_value = safe_parse_float(dm_property_values.get(property_key))
|
|
26
33
|
old_property_value = safe_parse_float(property_values.get(property_key))
|
|
27
34
|
if all([new_dm_value, old_dm_value, old_property_value]):
|
|
28
|
-
|
|
35
|
+
new_value = round(
|
|
36
|
+
old_property_value / old_dm_value * new_dm_value,
|
|
37
|
+
2
|
|
38
|
+
) if _should_rescale_by_dm(term_id) else old_property_value
|
|
39
|
+
blank_node_data[property_key] = new_value
|
|
29
40
|
return (blank_node | blank_node_data) if blank_node_data else None
|
|
30
41
|
|
|
31
42
|
|
|
@@ -68,6 +79,8 @@ def rescale_properties_from_dryMatter(model: str, node: dict, blank_nodes: list)
|
|
|
68
79
|
])
|
|
69
80
|
for prop in new_properties:
|
|
70
81
|
logShouldRun(node, model, term_id, True, property=prop.get('term', {}).get('@id'))
|
|
71
|
-
return
|
|
82
|
+
return (
|
|
83
|
+
blank_node | {'properties': merge_blank_nodes(all_properties, new_properties)}
|
|
84
|
+
) if new_properties else blank_node
|
|
72
85
|
|
|
73
86
|
return list(map(exec, blank_nodes))
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
from hestia_earth.schema import TermTermType
|
|
2
|
+
|
|
3
|
+
from .term import get_lookup_value
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def get_pef_grouping(term_id: str):
|
|
7
|
+
term = {'@id': term_id, 'termType': TermTermType.LANDCOVER.value}
|
|
8
|
+
grouping = get_lookup_value(term, column='pefTermGrouping')
|
|
9
|
+
return f"{grouping}" if grouping else None
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
from typing import Optional
|
|
2
|
-
|
|
1
|
+
from typing import Optional, List
|
|
2
|
+
from numpy import recarray
|
|
3
3
|
from hestia_earth.schema import SchemaType
|
|
4
4
|
from hestia_earth.utils.lookup import (
|
|
5
5
|
download_lookup, get_table_value, column_name, extract_grouped_data, _get_single_table_value
|
|
@@ -155,3 +155,14 @@ def is_input_id_allowed(data: dict, term: dict):
|
|
|
155
155
|
_ALLOW_ALL in allowed_values,
|
|
156
156
|
len(values) == 0
|
|
157
157
|
]) else any([value in allowed_values for value in values])
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def fallback_country(country_id: str, lookup_arrays: List[recarray]) -> str:
|
|
161
|
+
"""
|
|
162
|
+
Given a site dict with 'country_id' location term, and lookup table,
|
|
163
|
+
checks if a location can be used in lookup file
|
|
164
|
+
else fallback to the default "region-world"
|
|
165
|
+
"""
|
|
166
|
+
is_in_lookup = lambda v: all(v in array['termid'] for array in lookup_arrays) # noqa: E731
|
|
167
|
+
fallback_id = 'region-world'
|
|
168
|
+
return country_id if is_in_lookup(country_id) else fallback_id if is_in_lookup(fallback_id) else None
|
|
@@ -1,9 +1,8 @@
|
|
|
1
1
|
from collections import defaultdict
|
|
2
2
|
from collections.abc import Iterable
|
|
3
3
|
from functools import reduce
|
|
4
|
-
from dateutil import parser
|
|
5
4
|
from statistics import mode, mean
|
|
6
|
-
from typing import Any, Optional,
|
|
5
|
+
from typing import Any, Optional, Union
|
|
7
6
|
|
|
8
7
|
from hestia_earth.schema import MeasurementMethodClassification, SchemaType
|
|
9
8
|
from hestia_earth.utils.api import download_hestia
|
|
@@ -11,12 +10,12 @@ from hestia_earth.utils.model import linked_node
|
|
|
11
10
|
from hestia_earth.utils.tools import non_empty_list, flatten, safe_parse_float
|
|
12
11
|
from hestia_earth.utils.date import diff_in_days
|
|
13
12
|
|
|
13
|
+
from hestia_earth.models.utils.blank_node import most_relevant_blank_node_by_id
|
|
14
14
|
from . import _term_id, _include_method, flatten_args
|
|
15
15
|
from .term import get_lookup_value
|
|
16
16
|
|
|
17
17
|
|
|
18
18
|
# TODO: verify those values
|
|
19
|
-
MAX_DEPTH = 1000
|
|
20
19
|
OLDEST_DATE = '1800'
|
|
21
20
|
SOIL_TEXTURE_IDS = ['sandContent', 'siltContent', 'clayContent']
|
|
22
21
|
MEASUREMENT_REDUCE = {
|
|
@@ -42,32 +41,8 @@ def measurement_value(measurement: dict, is_larger_unit: bool = False) -> float:
|
|
|
42
41
|
return MEASUREMENT_REDUCE.get(reducer, lambda v: v[0])(value) if is_value_valid else 0
|
|
43
42
|
|
|
44
43
|
|
|
45
|
-
def _measurement_date(measurement: dict): return parser.isoparse(measurement.get('endDate', OLDEST_DATE))
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
def _distance(measurement: dict, date): return abs((_measurement_date(measurement) - date).days)
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
def _most_recent_measurements(measurements: list, date: str) -> list:
|
|
52
|
-
closest_date = parser.isoparse(date)
|
|
53
|
-
min_distance = min([_distance(m, closest_date) for m in measurements])
|
|
54
|
-
return list(filter(lambda m: _distance(m, closest_date) == min_distance, measurements))
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
def _shallowest_measurement(measurements: list) -> dict:
|
|
58
|
-
min_depth = min([m.get('depthUpper', MAX_DEPTH) for m in measurements])
|
|
59
|
-
return next((m for m in measurements if m.get('depthUpper', MAX_DEPTH) == min_depth), {})
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
def most_relevant_measurement(measurements: list, term_id: str, date: str):
|
|
63
|
-
filtered_measurements = [m for m in measurements if m.get('term', {}).get('@id') == term_id]
|
|
64
|
-
return {} if len(filtered_measurements) == 0 \
|
|
65
|
-
else _shallowest_measurement(_most_recent_measurements(filtered_measurements, date)) \
|
|
66
|
-
if date and len(filtered_measurements) > 1 else filtered_measurements[0]
|
|
67
|
-
|
|
68
|
-
|
|
69
44
|
def most_relevant_measurement_value(measurements: list, term_id: str, date: str, default=None):
|
|
70
|
-
measurement =
|
|
45
|
+
measurement = most_relevant_blank_node_by_id(measurements, term_id, date)
|
|
71
46
|
return measurement_value(measurement) if measurement else default
|
|
72
47
|
|
|
73
48
|
|