hestia-earth-models 0.67.0__py3-none-any.whl → 0.67.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hestia_earth/models/cml2001Baseline/resourceUseEnergyDepletionDuringCycle.py +5 -10
- hestia_earth/models/cycle/completeness/freshForage.py +7 -3
- hestia_earth/models/cycle/inorganicFertiliser.py +67 -17
- hestia_earth/models/environmentalFootprintV3_1/environmentalFootprintSingleOverallScore.py +42 -37
- hestia_earth/models/environmentalFootprintV3_1/soilQualityIndexLandTransformation.py +22 -14
- hestia_earth/models/environmentalFootprintV3_1/soilQualityIndexTotalLandUseEffects.py +17 -15
- hestia_earth/models/hestia/landTransformation100YearAverageDuringCycle.py +1 -1
- hestia_earth/models/hestia/landTransformation20YearAverageDuringCycle.py +1 -1
- hestia_earth/models/ipcc2019/aboveGroundBiomass.py +1 -1
- hestia_earth/models/ipcc2019/belowGroundBiomass.py +1 -1
- hestia_earth/models/ipcc2019/organicCarbonPerHa_tier_1_utils.py +4 -4
- hestia_earth/models/ipcc2019/organicCarbonPerHa_tier_2_utils.py +1 -1
- hestia_earth/models/mocking/search-results.json +1173 -1041
- hestia_earth/models/utils/blank_node.py +64 -11
- hestia_earth/models/utils/ecoClimateZone.py +2 -2
- hestia_earth/models/utils/impact_assessment.py +5 -4
- hestia_earth/models/utils/lookup.py +3 -5
- hestia_earth/models/version.py +1 -1
- {hestia_earth_models-0.67.0.dist-info → hestia_earth_models-0.67.1.dist-info}/METADATA +1 -1
- {hestia_earth_models-0.67.0.dist-info → hestia_earth_models-0.67.1.dist-info}/RECORD +29 -29
- tests/models/cml2001Baseline/test_resourceUseEnergyDepletionDuringCycle.py +68 -35
- tests/models/environmentalFootprintV3_1/test_environmentalFootprintSingleOverallScore.py +38 -8
- tests/models/environmentalFootprintV3_1/test_soilQualityIndexLandTransformation.py +65 -36
- tests/models/site/test_management.py +1 -4
- tests/models/utils/test_blank_node.py +13 -165
- tests/orchestrator/models/test_transformations.py +4 -1
- {hestia_earth_models-0.67.0.dist-info → hestia_earth_models-0.67.1.dist-info}/LICENSE +0 -0
- {hestia_earth_models-0.67.0.dist-info → hestia_earth_models-0.67.1.dist-info}/WHEEL +0 -0
- {hestia_earth_models-0.67.0.dist-info → hestia_earth_models-0.67.1.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
"""
|
2
2
|
This model converts all "energy" terms found in a `Cycle > Inputs` to `MJ` using optional
|
3
|
-
`
|
3
|
+
`energyContentLowerHeatingValue` and `density` properties or the term's "defaultProperties",
|
4
4
|
aggregates them, and places them inside a 'resourceUseEnergyDepletionDuringCycle' indicator per aggregated input id.
|
5
5
|
"""
|
6
6
|
from collections import defaultdict
|
@@ -12,7 +12,7 @@ from hestia_earth.utils.model import filter_list_term_type
|
|
12
12
|
from hestia_earth.utils.tools import list_sum
|
13
13
|
|
14
14
|
from hestia_earth.models.log import logRequirements, logShouldRun, log_as_table
|
15
|
-
from hestia_earth.models.utils import Units
|
15
|
+
from hestia_earth.models.utils import Units, _include
|
16
16
|
from hestia_earth.models.utils.blank_node import convert_unit
|
17
17
|
from hestia_earth.models.utils.indicator import _new_indicator
|
18
18
|
from . import MODEL
|
@@ -31,7 +31,7 @@ REQUIREMENTS = {
|
|
31
31
|
{
|
32
32
|
"@type": "Property",
|
33
33
|
"value": "",
|
34
|
-
"term.@id": "
|
34
|
+
"term.@id": "energyContentLowerHeatingValue",
|
35
35
|
"term.units": "MJ / kg"
|
36
36
|
},
|
37
37
|
{
|
@@ -57,7 +57,7 @@ RETURNS = {
|
|
57
57
|
}
|
58
58
|
|
59
59
|
LOOKUPS = {
|
60
|
-
"fuel": ["
|
60
|
+
"fuel": ["energyContentLowerHeatingValue", "density"]
|
61
61
|
}
|
62
62
|
|
63
63
|
TERM_ID = 'resourceUseEnergyDepletionDuringCycle'
|
@@ -123,12 +123,7 @@ def _should_run(cycle: dict) -> Tuple[bool, dict]:
|
|
123
123
|
grouped_energy_terms[k].extend(list(v))
|
124
124
|
|
125
125
|
logs = [
|
126
|
-
{
|
127
|
-
'id': input.get('@id'),
|
128
|
-
'units': input.get('units'),
|
129
|
-
'termType': input.get('termType'),
|
130
|
-
'value': input.get('value'),
|
131
|
-
'value-in-MJ': input.get('value-in-MJ'),
|
126
|
+
_include(input, ['id', 'units', 'termType', 'value', 'value-in-MJ']) | {
|
132
127
|
'properties': " ".join([
|
133
128
|
f"{p.get('term', {}).get('@id')}= {p.get('value')} ({p.get('term', {}).get('units')})"
|
134
129
|
for p in input.get('properties', [])
|
@@ -4,7 +4,8 @@ Completeness Fresh Forage
|
|
4
4
|
This model checks if we have the requirements below and updates the
|
5
5
|
[Data Completeness](https://hestia.earth/schema/Completeness#cropResidue) value.
|
6
6
|
"""
|
7
|
-
from hestia_earth.schema import SiteSiteType
|
7
|
+
from hestia_earth.schema import SiteSiteType, TermTermType
|
8
|
+
from hestia_earth.utils.model import filter_list_term_type
|
8
9
|
from hestia_earth.utils.tools import list_sum
|
9
10
|
|
10
11
|
from hestia_earth.models.log import logRequirements
|
@@ -46,18 +47,21 @@ ALLOWED_SITE_TYPES = [
|
|
46
47
|
def _valid_input(input: dict): return is_from_model(input) and list_sum(input.get('value', [-1])) >= 0
|
47
48
|
|
48
49
|
|
50
|
+
def _inputs(node: dict): return filter_list_term_type(node.get('inputs', []), TermTermType.FORAGE)
|
51
|
+
|
52
|
+
|
49
53
|
def run(cycle: dict):
|
50
54
|
site_type = cycle.get('site', {}).get('siteType')
|
51
55
|
site_type_allowed = site_type in ALLOWED_SITE_TYPES
|
52
56
|
|
53
|
-
cycle_has_added_forage_input = any(map(_valid_input, cycle
|
57
|
+
cycle_has_added_forage_input = any(map(_valid_input, _inputs(cycle)))
|
54
58
|
|
55
59
|
animals = [
|
56
60
|
a for a in cycle.get('animals', [])
|
57
61
|
if get_lookup_value(a.get('term', {}), 'isGrazingAnimal', model=MODEL, key=MODEL_KEY)
|
58
62
|
]
|
59
63
|
all_animals_have_added_forage_input = bool(animals) and all([
|
60
|
-
any(map(_valid_input, animal
|
64
|
+
any(map(_valid_input, _inputs(animal))) for animal in animals
|
61
65
|
])
|
62
66
|
|
63
67
|
logRequirements(cycle, model=MODEL, term=None, key=MODEL_KEY,
|
@@ -49,16 +49,28 @@ UNITS = [
|
|
49
49
|
]
|
50
50
|
VALUE_BY_UNIT = {
|
51
51
|
Units.KG_N.value: {
|
52
|
-
Units.KG_K2O.value: lambda
|
53
|
-
|
52
|
+
Units.KG_K2O.value: lambda data: (
|
53
|
+
data.get('value') / data.get('nitrogenContent-divide')
|
54
|
+
) * data.get('potassiumContentAsK2O-multiply'),
|
55
|
+
Units.KG_P2O5.value: lambda data: (
|
56
|
+
data.get('value') / data.get('nitrogenContent-divide')
|
57
|
+
) * data.get('phosphateContentAsP2O5-multiply')
|
54
58
|
},
|
55
59
|
Units.KG_K2O.value: {
|
56
|
-
Units.KG_N.value: lambda
|
57
|
-
|
60
|
+
Units.KG_N.value: lambda data: (
|
61
|
+
data.get('value') / data.get('potassiumContentAsK2O-divide')
|
62
|
+
) * data.get('nitrogenContent-multiply'),
|
63
|
+
Units.KG_P2O5.value: lambda data: (
|
64
|
+
data.get('value') / data.get('potassiumContentAsK2O-divide')
|
65
|
+
) * data.get('phosphateContentAsP2O5-multiply')
|
58
66
|
},
|
59
67
|
Units.KG_P2O5.value: {
|
60
|
-
Units.KG_N.value: lambda
|
61
|
-
|
68
|
+
Units.KG_N.value: lambda data: (
|
69
|
+
data.get('value') / data.get('phosphateContentAsP2O5-divide')
|
70
|
+
) * data.get('nitrogenContent-multiply'),
|
71
|
+
Units.KG_K2O.value: lambda data: (
|
72
|
+
data.get('value') / data.get('phosphateContentAsP2O5-divide')
|
73
|
+
) * data.get('potassiumContentAsK2O-multiply')
|
62
74
|
}
|
63
75
|
}
|
64
76
|
|
@@ -81,6 +93,7 @@ def _include_term_ids(term_id: str):
|
|
81
93
|
|
82
94
|
def _run_input(cycle: dict, input: dict):
|
83
95
|
term_id = input.get('term', {}).get('@id')
|
96
|
+
input_term_ids = _include_term_ids(term_id)
|
84
97
|
nitrogenContent = safe_parse_float(get_term_lookup(term_id, 'nitrogenContent'), 0)
|
85
98
|
nitrogenContent_min = safe_parse_float(get_term_lookup(term_id, 'nitrogenContent-min'), None)
|
86
99
|
nitrogenContent_max = safe_parse_float(get_term_lookup(term_id, 'nitrogenContent-max'), None)
|
@@ -96,28 +109,64 @@ def _run_input(cycle: dict, input: dict):
|
|
96
109
|
min_values = non_empty_list([nitrogenContent_min, phosphateContentAsP2O5_min, potassiumContentAsK2O_min])
|
97
110
|
max_values = non_empty_list([nitrogenContent_max, phosphateContentAsP2O5_max, potassiumContentAsK2O_max])
|
98
111
|
|
99
|
-
def include_input(input_term_id):
|
112
|
+
def include_input(input_term_id: str):
|
100
113
|
to_units = Units.KG_N.value if input_term_id.endswith('KgN') else (
|
101
114
|
Units.KG_K2O.value if input_term_id.endswith('KgK2O') else Units.KG_P2O5.value
|
102
115
|
)
|
103
116
|
|
104
117
|
debugValues(cycle, model=MODEL_LOG, term=input_term_id,
|
118
|
+
from_input_id=term_id,
|
105
119
|
from_units=from_units,
|
106
120
|
to_units=to_units,
|
107
|
-
input_value=input_value
|
108
|
-
|
109
|
-
|
110
|
-
|
121
|
+
input_value=input_value,
|
122
|
+
nitrogenContent=nitrogenContent,
|
123
|
+
nitrogenContent_min=nitrogenContent_min,
|
124
|
+
nitrogenContent_max=nitrogenContent_max,
|
125
|
+
phosphateContentAsP2O5=phosphateContentAsP2O5,
|
126
|
+
phosphateContentAsP2O5_min=phosphateContentAsP2O5_min,
|
127
|
+
phosphateContentAsP2O5_max=phosphateContentAsP2O5_max,
|
128
|
+
potassiumContentAsK2O=potassiumContentAsK2O,
|
129
|
+
potassiumContentAsK2O_min=potassiumContentAsK2O_min,
|
130
|
+
potassiumContentAsK2O_max=potassiumContentAsK2O_max)
|
131
|
+
|
132
|
+
converter = VALUE_BY_UNIT.get(from_units, {}).get(to_units, lambda *args: None)
|
133
|
+
value = converter(
|
134
|
+
{
|
135
|
+
'value': input_value,
|
136
|
+
'nitrogenContent-multiply': nitrogenContent,
|
137
|
+
'nitrogenContent-divide': nitrogenContent,
|
138
|
+
'phosphateContentAsP2O5-multiply': phosphateContentAsP2O5,
|
139
|
+
'phosphateContentAsP2O5-divide': phosphateContentAsP2O5,
|
140
|
+
'potassiumContentAsK2O-multiply': potassiumContentAsK2O,
|
141
|
+
'potassiumContentAsK2O-divide': potassiumContentAsK2O,
|
142
|
+
}
|
111
143
|
)
|
112
|
-
min =
|
113
|
-
|
144
|
+
min = converter(
|
145
|
+
{
|
146
|
+
'value': input_value,
|
147
|
+
'nitrogenContent-multiply': nitrogenContent_min,
|
148
|
+
'nitrogenContent-divide': nitrogenContent_max,
|
149
|
+
'phosphateContentAsP2O5-multiply': phosphateContentAsP2O5_min,
|
150
|
+
'phosphateContentAsP2O5-divide': phosphateContentAsP2O5_max,
|
151
|
+
'potassiumContentAsK2O-multiply': potassiumContentAsK2O_min,
|
152
|
+
'potassiumContentAsK2O-divide': potassiumContentAsK2O_max,
|
153
|
+
}
|
114
154
|
) if len(min_values) >= 2 else None
|
115
|
-
max =
|
116
|
-
|
155
|
+
max = converter(
|
156
|
+
{
|
157
|
+
'value': input_value,
|
158
|
+
'nitrogenContent-multiply': nitrogenContent_max,
|
159
|
+
'nitrogenContent-divide': nitrogenContent_min,
|
160
|
+
'phosphateContentAsP2O5-multiply': phosphateContentAsP2O5_max,
|
161
|
+
'phosphateContentAsP2O5-divide': phosphateContentAsP2O5_min,
|
162
|
+
'potassiumContentAsK2O-multiply': potassiumContentAsK2O_max,
|
163
|
+
'potassiumContentAsK2O-divide': potassiumContentAsK2O_min,
|
164
|
+
}
|
117
165
|
) if len(max_values) >= 2 else None
|
166
|
+
|
118
167
|
return _input(input_term_id, value, min, max) if value else None
|
119
168
|
|
120
|
-
return list(map(include_input,
|
169
|
+
return list(map(include_input, input_term_ids))
|
121
170
|
|
122
171
|
|
123
172
|
def _should_run_input(cycle: dict, input: dict):
|
@@ -130,7 +179,8 @@ def _should_run_input(cycle: dict, input: dict):
|
|
130
179
|
# skip inputs that already have all the inlcuded term with a value
|
131
180
|
inputs = cycle.get('inputs', [])
|
132
181
|
include_term_ids = [
|
133
|
-
term_id for term_id in _include_term_ids(term_id)
|
182
|
+
term_id for term_id in _include_term_ids(term_id)
|
183
|
+
if len(find_term_match(inputs, term_id).get('value', [])) == 0
|
134
184
|
]
|
135
185
|
should_run = all([
|
136
186
|
has_value,
|
@@ -8,32 +8,33 @@ summed to obtain the EF single overall score. The number and the name of the imp
|
|
8
8
|
in EF3.0 and EF3.1.
|
9
9
|
"""
|
10
10
|
from typing import List, Optional, Tuple
|
11
|
-
|
12
11
|
from hestia_earth.schema import TermTermType
|
13
|
-
from hestia_earth.utils.lookup import get_table_value, download_lookup, column_name
|
14
12
|
from hestia_earth.utils.model import filter_list_term_type
|
15
13
|
from hestia_earth.utils.tools import list_sum
|
16
14
|
|
17
|
-
from hestia_earth.models.log import logRequirements, logShouldRun, log_as_table
|
15
|
+
from hestia_earth.models.log import logRequirements, logShouldRun, log_as_table
|
18
16
|
from hestia_earth.models.utils.indicator import _new_indicator
|
19
17
|
from hestia_earth.models.utils.lookup import _node_value
|
18
|
+
from hestia_earth.models.utils.blank_node import get_lookup_value
|
20
19
|
from . import MODEL
|
21
20
|
|
22
21
|
REQUIREMENTS = {
|
23
22
|
"ImpactAssessment": {
|
24
|
-
"impacts": [
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
}
|
30
|
-
]
|
23
|
+
"impacts": [{
|
24
|
+
"@type": "Indicator",
|
25
|
+
"value": "",
|
26
|
+
"term.name": "PEF indicators only"
|
27
|
+
}]
|
31
28
|
}
|
32
29
|
}
|
33
30
|
|
34
31
|
LOOKUPS = {
|
35
32
|
"@doc": "Normalisation factors in PEF v3.1 are calculated using a Global population number of 6,895,889,018",
|
36
|
-
"characterisedIndicator": [
|
33
|
+
"characterisedIndicator": [
|
34
|
+
"pefTerm-normalisation-v3_1",
|
35
|
+
"pefTerm-weighing-v3_1",
|
36
|
+
"pefTerm-methodModel-whiteList-v3-1"
|
37
|
+
]
|
37
38
|
}
|
38
39
|
|
39
40
|
RETURNS = {
|
@@ -46,32 +47,40 @@ TERM_ID = 'environmentalFootprintSingleOverallScore'
|
|
46
47
|
|
47
48
|
normalisation_column = LOOKUPS['characterisedIndicator'][0]
|
48
49
|
weighing_column = LOOKUPS['characterisedIndicator'][1]
|
50
|
+
method_model_colum = LOOKUPS['characterisedIndicator'][2]
|
49
51
|
|
50
52
|
|
51
|
-
def _is_a_PEF_indicator(
|
52
|
-
|
53
|
-
|
53
|
+
def _is_a_PEF_indicator(indicator: dict) -> bool:
|
54
|
+
term = indicator.get('term', {})
|
55
|
+
indicator_method_model = indicator.get('methodModel', {}).get("@id")
|
56
|
+
return all([
|
57
|
+
indicator_method_model,
|
58
|
+
indicator_method_model in _get_pef_method_model(term),
|
59
|
+
_get_factor(term, normalisation_column) is not None,
|
60
|
+
_get_factor(term, weighing_column) is not None
|
61
|
+
])
|
54
62
|
|
55
63
|
|
56
|
-
def _get_factor(
|
57
|
-
factor =
|
58
|
-
'termid', indicator_id, column_name(column))
|
59
|
-
if factor is None:
|
60
|
-
debugMissingLookup(f"{list(LOOKUPS.keys())[1]}.csv", 'termid', indicator_id, column, None, model=MODEL,
|
61
|
-
term=TERM_ID)
|
64
|
+
def _get_factor(term: dict, column: str) -> Optional[float]:
|
65
|
+
factor = get_lookup_value(term, column, model=MODEL, term=TERM_ID)
|
62
66
|
return float(factor) if factor is not None else None
|
63
67
|
|
64
68
|
|
69
|
+
def _get_pef_method_model(term: dict) -> List[str]:
|
70
|
+
entries = get_lookup_value(term, method_model_colum, model=MODEL, term=TERM_ID) or ''
|
71
|
+
return entries.split(";")
|
72
|
+
|
73
|
+
|
65
74
|
def _normalise(indicator: dict) -> Optional[float]:
|
66
|
-
return (
|
67
|
-
|
68
|
-
|
75
|
+
return (
|
76
|
+
_node_value(indicator) / _get_factor(indicator.get('term', {}), normalisation_column)
|
77
|
+
) if (_node_value(indicator) is not None and _get_factor(indicator.get('term', {}), normalisation_column)) else None
|
69
78
|
|
70
79
|
|
71
80
|
def _weighted_normalise(indicator: dict) -> Optional[float]:
|
72
|
-
return (
|
73
|
-
|
74
|
-
|
81
|
+
return (
|
82
|
+
_normalise(indicator) * (_get_factor(indicator.get('term', {}), weighing_column) / 100)
|
83
|
+
) if (_normalise(indicator) is not None and _get_factor(indicator.get('term', {}), weighing_column)) else None
|
75
84
|
|
76
85
|
|
77
86
|
def _indicator(value: float) -> dict:
|
@@ -85,17 +94,15 @@ def _run(indicators: List[dict]):
|
|
85
94
|
|
86
95
|
|
87
96
|
def _valid_indicator(indicator: Optional[dict]) -> bool:
|
88
|
-
|
89
|
-
|
90
|
-
_node_value(indicator) is not None,
|
91
|
-
_is_a_PEF_indicator(indicator.get('term', {}).get('@id', ''))])
|
97
|
+
value = None if indicator is None else _node_value(indicator)
|
98
|
+
return isinstance(value, (int, float)) and _is_a_PEF_indicator(indicator)
|
92
99
|
|
93
100
|
|
94
101
|
def _should_run(impact_assessment: dict) -> Tuple[bool, list[dict]]:
|
95
102
|
indicators = [
|
96
103
|
indicator for indicator in
|
97
104
|
filter_list_term_type(impact_assessment.get('impacts', []), TermTermType.CHARACTERISEDINDICATOR)
|
98
|
-
if _is_a_PEF_indicator(indicator
|
105
|
+
if _is_a_PEF_indicator(indicator)
|
99
106
|
]
|
100
107
|
|
101
108
|
has_pef_indicators = bool(indicators)
|
@@ -104,15 +111,13 @@ def _should_run(impact_assessment: dict) -> Tuple[bool, list[dict]]:
|
|
104
111
|
"indicator": indicator,
|
105
112
|
"valid-indicator": _valid_indicator(indicator),
|
106
113
|
"one-indicator-for-category": sum(1 for i in indicators if i['term']['@id'] == indicator['term']['@id']) == 1,
|
107
|
-
"indicator-pef-category": indicator
|
114
|
+
"indicator-pef-category": indicator.get('term', {}).get('@id'),
|
108
115
|
"value": _node_value(indicator),
|
109
116
|
"normalised": _normalise(indicator),
|
110
|
-
"normalisation-used": _get_factor(indicator
|
117
|
+
"normalisation-used": _get_factor(indicator.get('term', {}), normalisation_column),
|
111
118
|
"weighted-normalised": _weighted_normalise(indicator),
|
112
|
-
"weighting-used": _get_factor(indicator
|
113
|
-
}
|
114
|
-
for indicator in indicators
|
115
|
-
]
|
119
|
+
"weighting-used": _get_factor(indicator.get('term', {}), weighing_column),
|
120
|
+
} for indicator in indicators]
|
116
121
|
|
117
122
|
no_duplicate_indicators = all([indicator['one-indicator-for-category'] for indicator in processed_indicators])
|
118
123
|
valid_indicators = [indicator for indicator in processed_indicators if indicator['valid-indicator']]
|
@@ -8,7 +8,7 @@ from typing import List, Tuple
|
|
8
8
|
|
9
9
|
from hestia_earth.schema import TermTermType
|
10
10
|
from hestia_earth.utils.lookup import download_lookup
|
11
|
-
from hestia_earth.utils.model import filter_list_term_type
|
11
|
+
from hestia_earth.utils.model import filter_list_term_type, find_term_match
|
12
12
|
from hestia_earth.utils.tools import list_sum
|
13
13
|
|
14
14
|
from hestia_earth.models.log import logRequirements, logShouldRun, log_as_table
|
@@ -26,7 +26,7 @@ REQUIREMENTS = {
|
|
26
26
|
"@type": "Indicator",
|
27
27
|
"term.termType": "resourceUse",
|
28
28
|
"term.@id": "landTransformation20YearAverageDuringCycle",
|
29
|
-
"value": "
|
29
|
+
"value": ">= 0",
|
30
30
|
"landCover": {"@type": "Term", "term.termType": "landCover"},
|
31
31
|
"previousLandCover": {"@type": "Term", "term.termType": "landCover"}
|
32
32
|
}
|
@@ -38,7 +38,7 @@ REQUIREMENTS = {
|
|
38
38
|
"@type": "Indicator",
|
39
39
|
"term.termType": "resourceUse",
|
40
40
|
"term.@id": "landTransformation20YearAverageInputsProduction",
|
41
|
-
"value": "
|
41
|
+
"value": ">= 0",
|
42
42
|
"landCover": {"@type": "Term", "term.termType": "landCover"},
|
43
43
|
"previousLandCover": {"@type": "Term", "term.termType": "landCover"}
|
44
44
|
}
|
@@ -80,7 +80,8 @@ def _indicator(value: float):
|
|
80
80
|
|
81
81
|
def _run(transformations: List[dict]):
|
82
82
|
values = [
|
83
|
-
(transformation.get(
|
83
|
+
(transformation.get("factor-from", 0) + transformation.get("factor-to", 0)
|
84
|
+
) * transformation.get("value", 0) * 20
|
84
85
|
for transformation in transformations
|
85
86
|
]
|
86
87
|
return _indicator(list_sum(values))
|
@@ -92,19 +93,22 @@ def _is_valid_indicator(indicator: dict) -> bool:
|
|
92
93
|
|
93
94
|
|
94
95
|
def _should_run(impact_assessment: dict) -> Tuple[bool, list]:
|
95
|
-
resource_uses =
|
96
|
+
resource_uses = [
|
97
|
+
i for i in filter_list_term_type(impact_assessment.get('emissionsResourceUse', []), TermTermType.RESOURCEUSE) if
|
98
|
+
_is_valid_indicator(i)
|
99
|
+
]
|
100
|
+
|
96
101
|
found_transformations = [
|
97
102
|
{
|
98
|
-
'
|
103
|
+
'value': _node_value(transformation_indicator),
|
99
104
|
'land-cover-id-from': transformation_indicator.get('previousLandCover', {}).get("@id"),
|
100
105
|
'land-cover-id-to': transformation_indicator.get('landCover', {}).get("@id"),
|
101
106
|
'indicator-id': transformation_indicator.get('term', {}).get('@id', ''),
|
102
|
-
'indicator-is-valid': _is_valid_indicator(transformation_indicator),
|
103
107
|
'good-land-cover-term': all([bool(transformation_indicator.get('landCover')),
|
104
108
|
bool(transformation_indicator.get('previousLandCover'))]),
|
105
109
|
'country-id': get_country_id(impact_assessment),
|
106
|
-
'
|
107
|
-
|
110
|
+
'value-is-valid': (_node_value(transformation_indicator) is not None and
|
111
|
+
_node_value(transformation_indicator) >= 0),
|
108
112
|
'lookup-country': fallback_country(get_country_id(impact_assessment),
|
109
113
|
[download_lookup(from_lookup_file), download_lookup(to_lookup_file)]),
|
110
114
|
} for transformation_indicator in resource_uses
|
@@ -130,8 +134,7 @@ def _should_run(impact_assessment: dict) -> Tuple[bool, list]:
|
|
130
134
|
|
131
135
|
valid_transformations_with_coef = [
|
132
136
|
t for t in found_transformations_with_coefficient if all([
|
133
|
-
t['
|
134
|
-
t['indicator-is-valid'],
|
137
|
+
t['value-is-valid'],
|
135
138
|
t['factor-from'] is not None,
|
136
139
|
t['factor-to'] is not None
|
137
140
|
])
|
@@ -144,19 +147,24 @@ def _should_run(impact_assessment: dict) -> Tuple[bool, list]:
|
|
144
147
|
|
145
148
|
all_transformations_are_valid = all(
|
146
149
|
[
|
147
|
-
all([t['
|
150
|
+
all([t['value-is-valid'], t['good-land-cover-term']])
|
148
151
|
for t in found_transformations_with_coefficient
|
149
152
|
]
|
150
153
|
) if found_transformations_with_coefficient else False
|
151
154
|
|
155
|
+
has_a_during_cycle_indicator = bool(find_term_match(resource_uses, "landTransformation20YearAverageDuringCycle"))
|
156
|
+
|
152
157
|
logRequirements(impact_assessment, model=MODEL, term=TERM_ID,
|
153
|
-
|
158
|
+
has_land_transformation_indicators=has_land_transformation_indicators,
|
159
|
+
has_a_during_cycle_indicator=has_a_during_cycle_indicator,
|
154
160
|
all_transformations_are_valid=all_transformations_are_valid,
|
155
161
|
has_valid_transformations_with_coef=bool(valid_transformations_with_coef),
|
156
162
|
found_transformations=log_as_table(found_transformations_with_coefficient)
|
157
163
|
)
|
158
164
|
|
159
|
-
should_run = all([has_land_transformation_indicators,
|
165
|
+
should_run = all([has_land_transformation_indicators,
|
166
|
+
has_a_during_cycle_indicator,
|
167
|
+
all_transformations_are_valid])
|
160
168
|
|
161
169
|
logShouldRun(impact_assessment, MODEL, TERM_ID, should_run)
|
162
170
|
return should_run, valid_transformations_with_coef
|
@@ -42,34 +42,36 @@ def _run(indicators: list):
|
|
42
42
|
|
43
43
|
|
44
44
|
def _should_run(impactassessment: dict) -> tuple[bool, list]:
|
45
|
-
|
45
|
+
soil_quality_indicators = [
|
46
46
|
i for i in impactassessment.get('emissionsResourceUse', []) if
|
47
47
|
i.get('term', {}).get('@id', '') in ['soilQualityIndexLandOccupation', 'soilQualityIndexLandTransformation']
|
48
48
|
]
|
49
|
-
|
49
|
+
has_soil_quality_indicators = bool(soil_quality_indicators)
|
50
50
|
|
51
|
-
|
52
|
-
|
53
|
-
|
51
|
+
soil_quality_occupation_indicator = find_term_match(soil_quality_indicators, "soilQualityIndexLandOccupation",
|
52
|
+
default_val=None)
|
53
|
+
has_soil_quality_land_occupation_indicator = bool(soil_quality_occupation_indicator)
|
54
54
|
|
55
|
-
|
56
|
-
|
57
|
-
|
55
|
+
soil_quality_transformation_indicator = find_term_match(soil_quality_indicators,
|
56
|
+
"soilQualityIndexLandTransformation",
|
57
|
+
default_val=None)
|
58
|
+
has_soil_quality_land_transformation_indicator = bool(soil_quality_transformation_indicator)
|
58
59
|
|
59
|
-
has_valid_values = all(
|
60
|
+
has_valid_values = all(
|
61
|
+
[isinstance(indicator.get('value', None), (int, float)) for indicator in soil_quality_indicators])
|
60
62
|
|
61
63
|
logRequirements(impactassessment, model=MODEL, term=TERM_ID,
|
62
|
-
|
63
|
-
|
64
|
-
|
64
|
+
has_soil_quality_indicators=has_soil_quality_indicators,
|
65
|
+
has_soil_quality_land_occupation_indicator=has_soil_quality_land_occupation_indicator,
|
66
|
+
has_soil_quality_land_transformation_indicator=has_soil_quality_land_transformation_indicator,
|
65
67
|
has_valid_values=has_valid_values
|
66
68
|
)
|
67
69
|
|
68
|
-
should_run = all([
|
69
|
-
|
70
|
+
should_run = all([has_soil_quality_indicators, has_valid_values,
|
71
|
+
has_soil_quality_land_occupation_indicator, has_soil_quality_land_transformation_indicator])
|
70
72
|
|
71
73
|
logShouldRun(impactassessment, MODEL, TERM_ID, should_run)
|
72
|
-
return should_run,
|
74
|
+
return should_run, soil_quality_indicators
|
73
75
|
|
74
76
|
|
75
77
|
def run(impactassessment: dict):
|
@@ -4,7 +4,7 @@ contained within the [ImpactAssesment.cycle](https://hestia.earth/schema/ImpactA
|
|
4
4
|
100 years.
|
5
5
|
|
6
6
|
It does this by multiplying the land occupation during the cycle by the
|
7
|
-
[Site](https://
|
7
|
+
[Site](https://hestia.earth/schema/Site) area 100 years ago and dividing by 100.
|
8
8
|
|
9
9
|
Land transformation from [land type] 100 years =
|
10
10
|
(Land occupation, during Cycle * Site Percentage Area 100 years ago [land type] / 100) / 100
|
@@ -4,7 +4,7 @@ contained within the [ImpactAssesment.cycle](https://hestia.earth/schema/ImpactA
|
|
4
4
|
20 years.
|
5
5
|
|
6
6
|
It does this by multiplying the land occupation during the cycle by the
|
7
|
-
[Site](https://
|
7
|
+
[Site](https://hestia.earth/schema/Site) area 20 years ago and dividing by 20.
|
8
8
|
|
9
9
|
Land transformation from [land type] 20 years =
|
10
10
|
(Land occupation, during Cycle * Site Percentage Area 20 years ago [land type] / 100) / 20
|
@@ -542,7 +542,7 @@ def _measurement(
|
|
542
542
|
max : list[float]
|
543
543
|
A list of maximum values representing the maximum modelled biomass stock for each year of the inventory.
|
544
544
|
statsDefinition : str
|
545
|
-
The [statsDefinition](https://
|
545
|
+
The [statsDefinition](https://hestia.earth/schema/Measurement#statsDefinition) of the measurement.
|
546
546
|
observations : list[int]
|
547
547
|
The number of model iterations used to calculate the descriptive statistics.
|
548
548
|
|
@@ -525,7 +525,7 @@ def _measurement(
|
|
525
525
|
max : list[float]
|
526
526
|
A list of maximum values representing the maximum modelled biomass stock for each year of the inventory.
|
527
527
|
statsDefinition : str
|
528
|
-
The [statsDefinition](https://
|
528
|
+
The [statsDefinition](https://hestia.earth/schema/Measurement#statsDefinition) of the measurement.
|
529
529
|
observations : list[int]
|
530
530
|
The number of model iterations used to calculate the descriptive statistics.
|
531
531
|
|
@@ -96,7 +96,7 @@ def _measurement(
|
|
96
96
|
Build a HESTIA `Measurement` node to contain a value and descriptive statistics calculated by the models.
|
97
97
|
|
98
98
|
The `descriptive_stats_dict` parameter should include the following keys and values from the
|
99
|
-
[Measurement](https://
|
99
|
+
[Measurement](https://hestia.earth/schema/Measurement) schema:
|
100
100
|
```
|
101
101
|
{
|
102
102
|
"value": list[float],
|
@@ -874,9 +874,9 @@ def _compile_inventory(
|
|
874
874
|
site_id : str
|
875
875
|
The `@id` of the site.
|
876
876
|
site_type : str
|
877
|
-
A valid [site type](https://
|
877
|
+
A valid [site type](https://hestia.earth/schema/Site#siteType).
|
878
878
|
management_nodes : list[dict]
|
879
|
-
A list of [Management nodes](https://
|
879
|
+
A list of [Management nodes](https://hestia.earth/schema/Management).
|
880
880
|
ipcc_soil_category : IpccSoilCategory
|
881
881
|
The site's assigned IPCC soil category.
|
882
882
|
|
@@ -949,7 +949,7 @@ def _assign_ipcc_soil_category(
|
|
949
949
|
Parameters
|
950
950
|
----------
|
951
951
|
measurement_nodes : list[dict]
|
952
|
-
List of A list of [Measurement nodes](https://
|
952
|
+
List of A list of [Measurement nodes](https://hestia.earth/schema/Measurement)..
|
953
953
|
default : IpccSoilCategory, optional
|
954
954
|
The default soil category if none matches, by default IpccSoilCategory.LOW_ACTIVITY_CLAY_SOILS.
|
955
955
|
|
@@ -110,7 +110,7 @@ def _measurement(
|
|
110
110
|
Build a HESTIA `Measurement` node to contain a value and descriptive statistics calculated by the models.
|
111
111
|
|
112
112
|
The `descriptive_stats_dict` parameter should include the following keys and values from the
|
113
|
-
[Measurement](https://
|
113
|
+
[Measurement](https://hestia.earth/schema/Measurement) schema:
|
114
114
|
```
|
115
115
|
{
|
116
116
|
"value": list[float],
|