hestia-earth-models 0.66.0__py3-none-any.whl → 0.67.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. hestia_earth/models/cml2001Baseline/abioticResourceDepletionFossilFuels.py +23 -54
  2. hestia_earth/models/cml2001Baseline/resourceUseEnergyDepletionDuringCycle.py +147 -0
  3. hestia_earth/models/cml2001Baseline/resourceUseEnergyDepletionInputsProduction.py +40 -0
  4. hestia_earth/models/cml2001Baseline/resourceUseMineralsAndMetalsDuringCycle.py +80 -0
  5. hestia_earth/models/cml2001Baseline/resourceUseMineralsAndMetalsInputsProduction.py +40 -0
  6. hestia_earth/models/config/ImpactAssessment.json +1869 -1846
  7. hestia_earth/models/cycle/completeness/freshForage.py +7 -3
  8. hestia_earth/models/cycle/inorganicFertiliser.py +67 -17
  9. hestia_earth/models/cycle/input/hestiaAggregatedData.py +13 -10
  10. hestia_earth/models/{environmentalFootprintV3 → environmentalFootprintV3_1}/__init__.py +4 -3
  11. hestia_earth/models/{environmentalFootprintV3 → environmentalFootprintV3_1}/environmentalFootprintSingleOverallScore.py +42 -37
  12. hestia_earth/models/environmentalFootprintV3_1/marineEutrophicationPotential.py +36 -0
  13. hestia_earth/models/environmentalFootprintV3_1/scarcityWeightedWaterUse.py +40 -0
  14. hestia_earth/models/{environmentalFootprintV3 → environmentalFootprintV3_1}/soilQualityIndexLandTransformation.py +22 -14
  15. hestia_earth/models/{environmentalFootprintV3 → environmentalFootprintV3_1}/soilQualityIndexTotalLandUseEffects.py +17 -15
  16. hestia_earth/models/hestia/landTransformation100YearAverageDuringCycle.py +1 -1
  17. hestia_earth/models/hestia/landTransformation20YearAverageDuringCycle.py +1 -1
  18. hestia_earth/models/impact_assessment/product/value.py +1 -1
  19. hestia_earth/models/ipcc2019/aboveGroundBiomass.py +2 -2
  20. hestia_earth/models/ipcc2019/belowGroundBiomass.py +2 -2
  21. hestia_earth/models/ipcc2019/co2ToAirCarbonStockChange_utils.py +2 -1
  22. hestia_earth/models/ipcc2019/organicCarbonPerHa_tier_1_utils.py +6 -5
  23. hestia_earth/models/ipcc2019/organicCarbonPerHa_tier_2_utils.py +3 -2
  24. hestia_earth/models/mocking/search-results.json +1200 -1068
  25. hestia_earth/models/site/management.py +2 -2
  26. hestia_earth/models/utils/__init__.py +6 -0
  27. hestia_earth/models/utils/aggregated.py +13 -10
  28. hestia_earth/models/utils/array_builders.py +4 -3
  29. hestia_earth/models/utils/blank_node.py +78 -21
  30. hestia_earth/models/utils/ecoClimateZone.py +2 -2
  31. hestia_earth/models/utils/impact_assessment.py +5 -4
  32. hestia_earth/models/utils/lookup.py +5 -5
  33. hestia_earth/models/utils/property.py +5 -2
  34. hestia_earth/models/version.py +1 -1
  35. hestia_earth/orchestrator/log.py +11 -0
  36. hestia_earth/orchestrator/models/__init__.py +8 -3
  37. {hestia_earth_models-0.66.0.dist-info → hestia_earth_models-0.67.1.dist-info}/METADATA +1 -1
  38. {hestia_earth_models-0.66.0.dist-info → hestia_earth_models-0.67.1.dist-info}/RECORD +64 -52
  39. tests/models/cml2001Baseline/test_abioticResourceDepletionFossilFuels.py +51 -87
  40. tests/models/cml2001Baseline/test_resourceUseEnergyDepletionDuringCycle.py +136 -0
  41. tests/models/cml2001Baseline/test_resourceUseEnergyDepletionInputsProduction.py +23 -0
  42. tests/models/cml2001Baseline/test_resourceUseMineralsAndMetalsDuringCycle.py +58 -0
  43. tests/models/cml2001Baseline/test_resourceUseMineralsAndMetalsInputsProduction.py +23 -0
  44. tests/models/{environmentalFootprintV3 → environmentalFootprintV3_1}/test_environmentalFootprintSingleOverallScore.py +43 -12
  45. tests/models/{environmentalFootprintV3 → environmentalFootprintV3_1}/test_freshwaterEcotoxicityPotentialCtue.py +6 -5
  46. tests/models/environmentalFootprintV3_1/test_marineEutrophicationPotential.py +27 -0
  47. tests/models/environmentalFootprintV3_1/test_scarcityWeightedWaterUse.py +32 -0
  48. tests/models/{environmentalFootprintV3 → environmentalFootprintV3_1}/test_soilQualityIndexLandOccupation.py +4 -3
  49. tests/models/environmentalFootprintV3_1/test_soilQualityIndexLandTransformation.py +194 -0
  50. tests/models/{environmentalFootprintV3 → environmentalFootprintV3_1}/test_soilQualityIndexTotalLandUseEffects.py +4 -4
  51. tests/models/impact_assessment/test_emissions.py +0 -1
  52. tests/models/site/test_management.py +1 -4
  53. tests/models/test_config.py +3 -3
  54. tests/models/test_ecoinventV3.py +0 -1
  55. tests/models/utils/test_array_builders.py +2 -2
  56. tests/models/utils/test_blank_node.py +13 -165
  57. tests/orchestrator/models/test_transformations.py +4 -1
  58. tests/models/environmentalFootprintV3/test_soilQualityIndexLandTransformation.py +0 -164
  59. /hestia_earth/models/{environmentalFootprintV3 → environmentalFootprintV3_1}/freshwaterEcotoxicityPotentialCtue.py +0 -0
  60. /hestia_earth/models/{environmentalFootprintV3 → environmentalFootprintV3_1}/soilQualityIndexLandOccupation.py +0 -0
  61. /hestia_earth/models/{environmentalFootprintV3 → environmentalFootprintV3_1}/utils.py +0 -0
  62. {hestia_earth_models-0.66.0.dist-info → hestia_earth_models-0.67.1.dist-info}/LICENSE +0 -0
  63. {hestia_earth_models-0.66.0.dist-info → hestia_earth_models-0.67.1.dist-info}/WHEEL +0 -0
  64. {hestia_earth_models-0.66.0.dist-info → hestia_earth_models-0.67.1.dist-info}/top_level.txt +0 -0
  65. /tests/models/{environmentalFootprintV3 → environmentalFootprintV3_1}/__init__.py +0 -0
@@ -4,7 +4,8 @@ Completeness Fresh Forage
4
4
  This model checks if we have the requirements below and updates the
5
5
  [Data Completeness](https://hestia.earth/schema/Completeness#cropResidue) value.
6
6
  """
7
- from hestia_earth.schema import SiteSiteType
7
+ from hestia_earth.schema import SiteSiteType, TermTermType
8
+ from hestia_earth.utils.model import filter_list_term_type
8
9
  from hestia_earth.utils.tools import list_sum
9
10
 
10
11
  from hestia_earth.models.log import logRequirements
@@ -46,18 +47,21 @@ ALLOWED_SITE_TYPES = [
46
47
  def _valid_input(input: dict): return is_from_model(input) and list_sum(input.get('value', [-1])) >= 0
47
48
 
48
49
 
50
+ def _inputs(node: dict): return filter_list_term_type(node.get('inputs', []), TermTermType.FORAGE)
51
+
52
+
49
53
  def run(cycle: dict):
50
54
  site_type = cycle.get('site', {}).get('siteType')
51
55
  site_type_allowed = site_type in ALLOWED_SITE_TYPES
52
56
 
53
- cycle_has_added_forage_input = any(map(_valid_input, cycle.get('inputs', [])))
57
+ cycle_has_added_forage_input = any(map(_valid_input, _inputs(cycle)))
54
58
 
55
59
  animals = [
56
60
  a for a in cycle.get('animals', [])
57
61
  if get_lookup_value(a.get('term', {}), 'isGrazingAnimal', model=MODEL, key=MODEL_KEY)
58
62
  ]
59
63
  all_animals_have_added_forage_input = bool(animals) and all([
60
- any(map(_valid_input, animal.get('inputs', []))) for animal in animals
64
+ any(map(_valid_input, _inputs(animal))) for animal in animals
61
65
  ])
62
66
 
63
67
  logRequirements(cycle, model=MODEL, term=None, key=MODEL_KEY,
@@ -49,16 +49,28 @@ UNITS = [
49
49
  ]
50
50
  VALUE_BY_UNIT = {
51
51
  Units.KG_N.value: {
52
- Units.KG_K2O.value: lambda value, nContent, p2O5Content, k2OContent: value * k2OContent / nContent,
53
- Units.KG_P2O5.value: lambda value, nContent, p2O5Content, k2OContent: value * p2O5Content / nContent
52
+ Units.KG_K2O.value: lambda data: (
53
+ data.get('value') / data.get('nitrogenContent-divide')
54
+ ) * data.get('potassiumContentAsK2O-multiply'),
55
+ Units.KG_P2O5.value: lambda data: (
56
+ data.get('value') / data.get('nitrogenContent-divide')
57
+ ) * data.get('phosphateContentAsP2O5-multiply')
54
58
  },
55
59
  Units.KG_K2O.value: {
56
- Units.KG_N.value: lambda value, nContent, p2O5Content, k2OContent: value / k2OContent * nContent,
57
- Units.KG_P2O5.value: lambda value, nContent, p2O5Content, k2OContent: value / k2OContent * p2O5Content
60
+ Units.KG_N.value: lambda data: (
61
+ data.get('value') / data.get('potassiumContentAsK2O-divide')
62
+ ) * data.get('nitrogenContent-multiply'),
63
+ Units.KG_P2O5.value: lambda data: (
64
+ data.get('value') / data.get('potassiumContentAsK2O-divide')
65
+ ) * data.get('phosphateContentAsP2O5-multiply')
58
66
  },
59
67
  Units.KG_P2O5.value: {
60
- Units.KG_N.value: lambda value, nContent, p2O5Content, k2OContent: value / p2O5Content * nContent,
61
- Units.KG_K2O.value: lambda value, nContent, p2O5Content, k2OContent: value / p2O5Content * k2OContent
68
+ Units.KG_N.value: lambda data: (
69
+ data.get('value') / data.get('phosphateContentAsP2O5-divide')
70
+ ) * data.get('nitrogenContent-multiply'),
71
+ Units.KG_K2O.value: lambda data: (
72
+ data.get('value') / data.get('phosphateContentAsP2O5-divide')
73
+ ) * data.get('potassiumContentAsK2O-multiply')
62
74
  }
63
75
  }
64
76
 
@@ -81,6 +93,7 @@ def _include_term_ids(term_id: str):
81
93
 
82
94
  def _run_input(cycle: dict, input: dict):
83
95
  term_id = input.get('term', {}).get('@id')
96
+ input_term_ids = _include_term_ids(term_id)
84
97
  nitrogenContent = safe_parse_float(get_term_lookup(term_id, 'nitrogenContent'), 0)
85
98
  nitrogenContent_min = safe_parse_float(get_term_lookup(term_id, 'nitrogenContent-min'), None)
86
99
  nitrogenContent_max = safe_parse_float(get_term_lookup(term_id, 'nitrogenContent-max'), None)
@@ -96,28 +109,64 @@ def _run_input(cycle: dict, input: dict):
96
109
  min_values = non_empty_list([nitrogenContent_min, phosphateContentAsP2O5_min, potassiumContentAsK2O_min])
97
110
  max_values = non_empty_list([nitrogenContent_max, phosphateContentAsP2O5_max, potassiumContentAsK2O_max])
98
111
 
99
- def include_input(input_term_id):
112
+ def include_input(input_term_id: str):
100
113
  to_units = Units.KG_N.value if input_term_id.endswith('KgN') else (
101
114
  Units.KG_K2O.value if input_term_id.endswith('KgK2O') else Units.KG_P2O5.value
102
115
  )
103
116
 
104
117
  debugValues(cycle, model=MODEL_LOG, term=input_term_id,
118
+ from_input_id=term_id,
105
119
  from_units=from_units,
106
120
  to_units=to_units,
107
- input_value=input_value)
108
-
109
- value = VALUE_BY_UNIT.get(from_units, {}).get(to_units, lambda *args: None)(
110
- input_value, nitrogenContent, phosphateContentAsP2O5, potassiumContentAsK2O
121
+ input_value=input_value,
122
+ nitrogenContent=nitrogenContent,
123
+ nitrogenContent_min=nitrogenContent_min,
124
+ nitrogenContent_max=nitrogenContent_max,
125
+ phosphateContentAsP2O5=phosphateContentAsP2O5,
126
+ phosphateContentAsP2O5_min=phosphateContentAsP2O5_min,
127
+ phosphateContentAsP2O5_max=phosphateContentAsP2O5_max,
128
+ potassiumContentAsK2O=potassiumContentAsK2O,
129
+ potassiumContentAsK2O_min=potassiumContentAsK2O_min,
130
+ potassiumContentAsK2O_max=potassiumContentAsK2O_max)
131
+
132
+ converter = VALUE_BY_UNIT.get(from_units, {}).get(to_units, lambda *args: None)
133
+ value = converter(
134
+ {
135
+ 'value': input_value,
136
+ 'nitrogenContent-multiply': nitrogenContent,
137
+ 'nitrogenContent-divide': nitrogenContent,
138
+ 'phosphateContentAsP2O5-multiply': phosphateContentAsP2O5,
139
+ 'phosphateContentAsP2O5-divide': phosphateContentAsP2O5,
140
+ 'potassiumContentAsK2O-multiply': potassiumContentAsK2O,
141
+ 'potassiumContentAsK2O-divide': potassiumContentAsK2O,
142
+ }
111
143
  )
112
- min = VALUE_BY_UNIT.get(from_units, {}).get(to_units, lambda *args: None)(
113
- input_value, nitrogenContent_min, phosphateContentAsP2O5_min, potassiumContentAsK2O_min
144
+ min = converter(
145
+ {
146
+ 'value': input_value,
147
+ 'nitrogenContent-multiply': nitrogenContent_min,
148
+ 'nitrogenContent-divide': nitrogenContent_max,
149
+ 'phosphateContentAsP2O5-multiply': phosphateContentAsP2O5_min,
150
+ 'phosphateContentAsP2O5-divide': phosphateContentAsP2O5_max,
151
+ 'potassiumContentAsK2O-multiply': potassiumContentAsK2O_min,
152
+ 'potassiumContentAsK2O-divide': potassiumContentAsK2O_max,
153
+ }
114
154
  ) if len(min_values) >= 2 else None
115
- max = VALUE_BY_UNIT.get(from_units, {}).get(to_units, lambda *args: None)(
116
- input_value, nitrogenContent_max, phosphateContentAsP2O5_max, potassiumContentAsK2O_max
155
+ max = converter(
156
+ {
157
+ 'value': input_value,
158
+ 'nitrogenContent-multiply': nitrogenContent_max,
159
+ 'nitrogenContent-divide': nitrogenContent_min,
160
+ 'phosphateContentAsP2O5-multiply': phosphateContentAsP2O5_max,
161
+ 'phosphateContentAsP2O5-divide': phosphateContentAsP2O5_min,
162
+ 'potassiumContentAsK2O-multiply': potassiumContentAsK2O_max,
163
+ 'potassiumContentAsK2O-divide': potassiumContentAsK2O_min,
164
+ }
117
165
  ) if len(max_values) >= 2 else None
166
+
118
167
  return _input(input_term_id, value, min, max) if value else None
119
168
 
120
- return list(map(include_input, _include_term_ids(term_id)))
169
+ return list(map(include_input, input_term_ids))
121
170
 
122
171
 
123
172
  def _should_run_input(cycle: dict, input: dict):
@@ -130,7 +179,8 @@ def _should_run_input(cycle: dict, input: dict):
130
179
  # skip inputs that already have all the inlcuded term with a value
131
180
  inputs = cycle.get('inputs', [])
132
181
  include_term_ids = [
133
- term_id for term_id in _include_term_ids(term_id) if len(find_term_match(inputs, term_id).get('value', [])) == 0
182
+ term_id for term_id in _include_term_ids(term_id)
183
+ if len(find_term_match(inputs, term_id).get('value', [])) == 0
134
184
  ]
135
185
  should_run = all([
136
186
  has_value,
@@ -77,19 +77,22 @@ def _run_seed(cycle: dict, primary_product: dict, seed_input: dict, product_term
77
77
  # to avoid double counting seed => aggregated impact => seed, we need to get the impact of the previous decade
78
78
  # if the data does not exist, use the aggregated impact of generic crop instead
79
79
  date = aggregated_end_date(cycle.get('endDate'))
80
- match_end_date = [
81
- {'match': {'endDate': date - 10}}
82
- ]
80
+ match_end_date = [{'match': {'endDate': date - 10}}]
81
+ default_product = get_generic_crop()
83
82
 
84
- impact = find_closest_impact(cycle, date, {'term': product}, region, country, match_end_date) or \
85
- find_closest_impact(cycle, date, primary_product, region, country, match_end_date) or \
86
- find_closest_impact(cycle, date, {'term': get_generic_crop()}, region, country)
83
+ impact = (
84
+ find_closest_impact(cycle, date, product, region, country, match_end_date) or
85
+ find_closest_impact(cycle, date, primary_product.get('term', {}), region, country, match_end_date) or
86
+ find_closest_impact(cycle, date, default_product, region, country)
87
+ )
87
88
 
89
+ search_by_product_term_id = (product or primary_product or default_product).get('@id')
90
+ search_by_region_id = (region or country or {}).get('@id') or 'region-world'
88
91
  debugValues(cycle, model=MODEL_ID, term=seed_input.get('term', {}).get('@id'), key=MODEL_KEY,
89
- input_region=(region or {}).get('@id'),
90
- input_country=(country or {}).get('@id'),
91
- date=date,
92
- impact=(impact or {}).get('@id'))
92
+ search_by_product_term_id=search_by_product_term_id,
93
+ search_by_region_id=search_by_region_id,
94
+ search_by_end_date=str(date),
95
+ impact_assessment_id_found=(impact or {}).get('@id'))
93
96
 
94
97
  return seed_input | {MODEL_KEY: linked_node(impact), 'impactAssessmentIsProxy': True} if impact else None
95
98
 
@@ -1,13 +1,14 @@
1
- from os.path import dirname, abspath
2
1
  import sys
3
2
  from importlib import import_module
3
+ from os.path import dirname, abspath
4
4
 
5
5
  from hestia_earth.models.utils.blank_node import run_if_required
6
6
 
7
7
  CURRENT_DIR = dirname(abspath(__file__)) + '/'
8
8
  sys.path.append(CURRENT_DIR)
9
- MODEL = 'environmentalFootprintV3'
10
- PKG = '.'.join(['hestia_earth', 'models', MODEL])
9
+ MODEL = 'environmentalFootprintV3-1'
10
+ MODEL_FOLDER = MODEL.replace('-', '_')
11
+ PKG = '.'.join(['hestia_earth', 'models', MODEL_FOLDER])
11
12
 
12
13
 
13
14
  def run(model: str, data): return run_if_required(MODEL, model, data, import_module(f".{model}", package=PKG))
@@ -8,32 +8,33 @@ summed to obtain the EF single overall score. The number and the name of the imp
8
8
  in EF3.0 and EF3.1.
9
9
  """
10
10
  from typing import List, Optional, Tuple
11
-
12
11
  from hestia_earth.schema import TermTermType
13
- from hestia_earth.utils.lookup import get_table_value, download_lookup, column_name
14
12
  from hestia_earth.utils.model import filter_list_term_type
15
13
  from hestia_earth.utils.tools import list_sum
16
14
 
17
- from hestia_earth.models.log import logRequirements, logShouldRun, log_as_table, debugMissingLookup
15
+ from hestia_earth.models.log import logRequirements, logShouldRun, log_as_table
18
16
  from hestia_earth.models.utils.indicator import _new_indicator
19
17
  from hestia_earth.models.utils.lookup import _node_value
18
+ from hestia_earth.models.utils.blank_node import get_lookup_value
20
19
  from . import MODEL
21
20
 
22
21
  REQUIREMENTS = {
23
22
  "ImpactAssessment": {
24
- "impacts": [
25
- {
26
- "@type": "Indicator",
27
- "value": "",
28
- "term.name": "PEF indicators only"
29
- }
30
- ]
23
+ "impacts": [{
24
+ "@type": "Indicator",
25
+ "value": "",
26
+ "term.name": "PEF indicators only"
27
+ }]
31
28
  }
32
29
  }
33
30
 
34
31
  LOOKUPS = {
35
32
  "@doc": "Normalisation factors in PEF v3.1 are calculated using a Global population number of 6,895,889,018",
36
- "characterisedIndicator": ["pefTerm-normalisation-v3_1", "pefTerm-weighing-v3_1"]
33
+ "characterisedIndicator": [
34
+ "pefTerm-normalisation-v3_1",
35
+ "pefTerm-weighing-v3_1",
36
+ "pefTerm-methodModel-whiteList-v3-1"
37
+ ]
37
38
  }
38
39
 
39
40
  RETURNS = {
@@ -46,32 +47,40 @@ TERM_ID = 'environmentalFootprintSingleOverallScore'
46
47
 
47
48
  normalisation_column = LOOKUPS['characterisedIndicator'][0]
48
49
  weighing_column = LOOKUPS['characterisedIndicator'][1]
50
+ method_model_colum = LOOKUPS['characterisedIndicator'][2]
49
51
 
50
52
 
51
- def _is_a_PEF_indicator(indicator_id) -> bool:
52
- return (_get_factor(indicator_id, normalisation_column) not in [None, 0, 0.0] and
53
- _get_factor(indicator_id, weighing_column) not in [None, 0, 0.0])
53
+ def _is_a_PEF_indicator(indicator: dict) -> bool:
54
+ term = indicator.get('term', {})
55
+ indicator_method_model = indicator.get('methodModel', {}).get("@id")
56
+ return all([
57
+ indicator_method_model,
58
+ indicator_method_model in _get_pef_method_model(term),
59
+ _get_factor(term, normalisation_column) is not None,
60
+ _get_factor(term, weighing_column) is not None
61
+ ])
54
62
 
55
63
 
56
- def _get_factor(indicator_id: str, column) -> Optional[float]:
57
- factor = get_table_value(download_lookup(f"{list(LOOKUPS.keys())[1]}.csv", keep_in_memory=True),
58
- 'termid', indicator_id, column_name(column))
59
- if factor is None:
60
- debugMissingLookup(f"{list(LOOKUPS.keys())[1]}.csv", 'termid', indicator_id, column, None, model=MODEL,
61
- term=TERM_ID)
64
+ def _get_factor(term: dict, column: str) -> Optional[float]:
65
+ factor = get_lookup_value(term, column, model=MODEL, term=TERM_ID)
62
66
  return float(factor) if factor is not None else None
63
67
 
64
68
 
69
+ def _get_pef_method_model(term: dict) -> List[str]:
70
+ entries = get_lookup_value(term, method_model_colum, model=MODEL, term=TERM_ID) or ''
71
+ return entries.split(";")
72
+
73
+
65
74
  def _normalise(indicator: dict) -> Optional[float]:
66
- return (_node_value(indicator) / _get_factor(indicator['term']['@id'], normalisation_column)) \
67
- if (_node_value(indicator) is not None and
68
- _get_factor(indicator['term']['@id'], normalisation_column) not in [None, 0, 0.0]) else None
75
+ return (
76
+ _node_value(indicator) / _get_factor(indicator.get('term', {}), normalisation_column)
77
+ ) if (_node_value(indicator) is not None and _get_factor(indicator.get('term', {}), normalisation_column)) else None
69
78
 
70
79
 
71
80
  def _weighted_normalise(indicator: dict) -> Optional[float]:
72
- return (_normalise(indicator) * (_get_factor(indicator['term']['@id'], weighing_column) / 100)
73
- ) if (_normalise(indicator) is not None and
74
- _get_factor(indicator['term']['@id'], weighing_column) not in [None, 0, 0.0]) else None
81
+ return (
82
+ _normalise(indicator) * (_get_factor(indicator.get('term', {}), weighing_column) / 100)
83
+ ) if (_normalise(indicator) is not None and _get_factor(indicator.get('term', {}), weighing_column)) else None
75
84
 
76
85
 
77
86
  def _indicator(value: float) -> dict:
@@ -85,17 +94,15 @@ def _run(indicators: List[dict]):
85
94
 
86
95
 
87
96
  def _valid_indicator(indicator: Optional[dict]) -> bool:
88
- return all([indicator is not None,
89
- isinstance(_node_value(indicator), (int, float)),
90
- _node_value(indicator) is not None,
91
- _is_a_PEF_indicator(indicator.get('term', {}).get('@id', ''))])
97
+ value = None if indicator is None else _node_value(indicator)
98
+ return isinstance(value, (int, float)) and _is_a_PEF_indicator(indicator)
92
99
 
93
100
 
94
101
  def _should_run(impact_assessment: dict) -> Tuple[bool, list[dict]]:
95
102
  indicators = [
96
103
  indicator for indicator in
97
104
  filter_list_term_type(impact_assessment.get('impacts', []), TermTermType.CHARACTERISEDINDICATOR)
98
- if _is_a_PEF_indicator(indicator.get('term', {}).get('@id', ''))
105
+ if _is_a_PEF_indicator(indicator)
99
106
  ]
100
107
 
101
108
  has_pef_indicators = bool(indicators)
@@ -104,15 +111,13 @@ def _should_run(impact_assessment: dict) -> Tuple[bool, list[dict]]:
104
111
  "indicator": indicator,
105
112
  "valid-indicator": _valid_indicator(indicator),
106
113
  "one-indicator-for-category": sum(1 for i in indicators if i['term']['@id'] == indicator['term']['@id']) == 1,
107
- "indicator-pef-category": indicator['term']['@id'],
114
+ "indicator-pef-category": indicator.get('term', {}).get('@id'),
108
115
  "value": _node_value(indicator),
109
116
  "normalised": _normalise(indicator),
110
- "normalisation-used": _get_factor(indicator['term']['@id'], normalisation_column),
117
+ "normalisation-used": _get_factor(indicator.get('term', {}), normalisation_column),
111
118
  "weighted-normalised": _weighted_normalise(indicator),
112
- "weighting-used": _get_factor(indicator['term']['@id'], weighing_column),
113
- }
114
- for indicator in indicators
115
- ]
119
+ "weighting-used": _get_factor(indicator.get('term', {}), weighing_column),
120
+ } for indicator in indicators]
116
121
 
117
122
  no_duplicate_indicators = all([indicator['one-indicator-for-category'] for indicator in processed_indicators])
118
123
  valid_indicators = [indicator for indicator in processed_indicators if indicator['valid-indicator']]
@@ -0,0 +1,36 @@
1
+ from hestia_earth.models.log import logRequirements, logShouldRun
2
+ from hestia_earth.models.utils.impact_assessment import impact_emission_lookup_value
3
+ from hestia_earth.models.utils.indicator import _new_indicator
4
+ from . import MODEL
5
+
6
+ REQUIREMENTS = {
7
+ "ImpactAssessment": {
8
+ "emissionsResourceUse": [{"@type": "Indicator", "value": "", "term.termType": "emission"}]
9
+ }
10
+ }
11
+
12
+ RETURNS = {
13
+ "Indicator": {
14
+ "value": ""
15
+ }
16
+ }
17
+
18
+ LOOKUPS = {
19
+ "emission": "nEqMarineEutrophicationEnvironmentalFootprintV3"
20
+ }
21
+
22
+ TERM_ID = 'marineEutrophicationPotential'
23
+
24
+
25
+ def _indicator(value: float):
26
+ indicator = _new_indicator(TERM_ID, MODEL)
27
+ indicator['value'] = value
28
+ return indicator
29
+
30
+
31
+ def run(impact_assessment: dict):
32
+ value = impact_emission_lookup_value(MODEL, TERM_ID, impact_assessment, LOOKUPS['emission'])
33
+ logRequirements(impact_assessment, model=MODEL, term=TERM_ID,
34
+ value=value)
35
+ logShouldRun(impact_assessment, MODEL, TERM_ID, value is not None)
36
+ return _indicator(value) if value is not None else None
@@ -0,0 +1,40 @@
1
+ from hestia_earth.models.log import logRequirements, logShouldRun
2
+ from . import MODEL
3
+ from ..utils.impact_assessment import impact_country_value
4
+ from ..utils.indicator import _new_indicator
5
+
6
+ REQUIREMENTS = {
7
+ "ImpactAssessment": {
8
+ "emissionsResourceUse": [{"@type": "Indicator",
9
+ "term.@id": "freshwaterWithdrawalsDuringCycle",
10
+ "value": ""
11
+ }],
12
+ "optional": {"country": {"@type": "Term", "termType": "region"}}
13
+ }
14
+ }
15
+
16
+ LOOKUPS = {
17
+ "region-resourceUse-environmentalFootprintV31WaterUse": ""
18
+ }
19
+
20
+ RETURNS = {
21
+ "Indicator": {
22
+ "value": ""
23
+ }
24
+ }
25
+ TERM_ID = 'scarcityWeightedWaterUse'
26
+
27
+
28
+ def _indicator(value: float):
29
+ indicator = _new_indicator(TERM_ID, MODEL)
30
+ indicator['value'] = value
31
+ return indicator
32
+
33
+
34
+ def run(impact_assessment: dict):
35
+ value = impact_country_value(MODEL, TERM_ID, impact_assessment, f"{list(LOOKUPS.keys())[0]}.csv",
36
+ country_fallback=True)
37
+ logRequirements(impact_assessment, model=MODEL, term=TERM_ID,
38
+ value=value)
39
+ logShouldRun(impact_assessment, MODEL, TERM_ID, value is not None)
40
+ return None if value is None else _indicator(value)
@@ -8,7 +8,7 @@ from typing import List, Tuple
8
8
 
9
9
  from hestia_earth.schema import TermTermType
10
10
  from hestia_earth.utils.lookup import download_lookup
11
- from hestia_earth.utils.model import filter_list_term_type
11
+ from hestia_earth.utils.model import filter_list_term_type, find_term_match
12
12
  from hestia_earth.utils.tools import list_sum
13
13
 
14
14
  from hestia_earth.models.log import logRequirements, logShouldRun, log_as_table
@@ -26,7 +26,7 @@ REQUIREMENTS = {
26
26
  "@type": "Indicator",
27
27
  "term.termType": "resourceUse",
28
28
  "term.@id": "landTransformation20YearAverageDuringCycle",
29
- "value": "> 0",
29
+ "value": ">= 0",
30
30
  "landCover": {"@type": "Term", "term.termType": "landCover"},
31
31
  "previousLandCover": {"@type": "Term", "term.termType": "landCover"}
32
32
  }
@@ -38,7 +38,7 @@ REQUIREMENTS = {
38
38
  "@type": "Indicator",
39
39
  "term.termType": "resourceUse",
40
40
  "term.@id": "landTransformation20YearAverageInputsProduction",
41
- "value": "> 0",
41
+ "value": ">= 0",
42
42
  "landCover": {"@type": "Term", "term.termType": "landCover"},
43
43
  "previousLandCover": {"@type": "Term", "term.termType": "landCover"}
44
44
  }
@@ -80,7 +80,8 @@ def _indicator(value: float):
80
80
 
81
81
  def _run(transformations: List[dict]):
82
82
  values = [
83
- (transformation.get('factor-from', 0) + transformation.get('factor-to', 0)) * transformation.get('area', 0) * 20
83
+ (transformation.get("factor-from", 0) + transformation.get("factor-to", 0)
84
+ ) * transformation.get("value", 0) * 20
84
85
  for transformation in transformations
85
86
  ]
86
87
  return _indicator(list_sum(values))
@@ -92,19 +93,22 @@ def _is_valid_indicator(indicator: dict) -> bool:
92
93
 
93
94
 
94
95
  def _should_run(impact_assessment: dict) -> Tuple[bool, list]:
95
- resource_uses = filter_list_term_type(impact_assessment.get('emissionsResourceUse', []), TermTermType.RESOURCEUSE)
96
+ resource_uses = [
97
+ i for i in filter_list_term_type(impact_assessment.get('emissionsResourceUse', []), TermTermType.RESOURCEUSE) if
98
+ _is_valid_indicator(i)
99
+ ]
100
+
96
101
  found_transformations = [
97
102
  {
98
- 'area': _node_value(transformation_indicator),
103
+ 'value': _node_value(transformation_indicator),
99
104
  'land-cover-id-from': transformation_indicator.get('previousLandCover', {}).get("@id"),
100
105
  'land-cover-id-to': transformation_indicator.get('landCover', {}).get("@id"),
101
106
  'indicator-id': transformation_indicator.get('term', {}).get('@id', ''),
102
- 'indicator-is-valid': _is_valid_indicator(transformation_indicator),
103
107
  'good-land-cover-term': all([bool(transformation_indicator.get('landCover')),
104
108
  bool(transformation_indicator.get('previousLandCover'))]),
105
109
  'country-id': get_country_id(impact_assessment),
106
- 'area-is-valid': _node_value(transformation_indicator) is not None and _node_value(
107
- transformation_indicator) > 0,
110
+ 'value-is-valid': (_node_value(transformation_indicator) is not None and
111
+ _node_value(transformation_indicator) >= 0),
108
112
  'lookup-country': fallback_country(get_country_id(impact_assessment),
109
113
  [download_lookup(from_lookup_file), download_lookup(to_lookup_file)]),
110
114
  } for transformation_indicator in resource_uses
@@ -130,8 +134,7 @@ def _should_run(impact_assessment: dict) -> Tuple[bool, list]:
130
134
 
131
135
  valid_transformations_with_coef = [
132
136
  t for t in found_transformations_with_coefficient if all([
133
- t['area-is-valid'],
134
- t['indicator-is-valid'],
137
+ t['value-is-valid'],
135
138
  t['factor-from'] is not None,
136
139
  t['factor-to'] is not None
137
140
  ])
@@ -144,19 +147,24 @@ def _should_run(impact_assessment: dict) -> Tuple[bool, list]:
144
147
 
145
148
  all_transformations_are_valid = all(
146
149
  [
147
- all([t['area-is-valid'], t['indicator-is-valid'], t['good-land-cover-term']])
150
+ all([t['value-is-valid'], t['good-land-cover-term']])
148
151
  for t in found_transformations_with_coefficient
149
152
  ]
150
153
  ) if found_transformations_with_coefficient else False
151
154
 
155
+ has_a_during_cycle_indicator = bool(find_term_match(resource_uses, "landTransformation20YearAverageDuringCycle"))
156
+
152
157
  logRequirements(impact_assessment, model=MODEL, term=TERM_ID,
153
- has_land_occupation_indicators=has_land_transformation_indicators,
158
+ has_land_transformation_indicators=has_land_transformation_indicators,
159
+ has_a_during_cycle_indicator=has_a_during_cycle_indicator,
154
160
  all_transformations_are_valid=all_transformations_are_valid,
155
161
  has_valid_transformations_with_coef=bool(valid_transformations_with_coef),
156
162
  found_transformations=log_as_table(found_transformations_with_coefficient)
157
163
  )
158
164
 
159
- should_run = all([has_land_transformation_indicators, all_transformations_are_valid])
165
+ should_run = all([has_land_transformation_indicators,
166
+ has_a_during_cycle_indicator,
167
+ all_transformations_are_valid])
160
168
 
161
169
  logShouldRun(impact_assessment, MODEL, TERM_ID, should_run)
162
170
  return should_run, valid_transformations_with_coef
@@ -42,34 +42,36 @@ def _run(indicators: list):
42
42
 
43
43
 
44
44
  def _should_run(impactassessment: dict) -> tuple[bool, list]:
45
- land_indicators = [
45
+ soil_quality_indicators = [
46
46
  i for i in impactassessment.get('emissionsResourceUse', []) if
47
47
  i.get('term', {}).get('@id', '') in ['soilQualityIndexLandOccupation', 'soilQualityIndexLandTransformation']
48
48
  ]
49
- has_indicators = bool(land_indicators)
49
+ has_soil_quality_indicators = bool(soil_quality_indicators)
50
50
 
51
- land_occupation_indicator = find_term_match(land_indicators, "soilQualityIndexLandOccupation",
52
- default_val=None)
53
- has_land_occupation_indicator = bool(land_occupation_indicator)
51
+ soil_quality_occupation_indicator = find_term_match(soil_quality_indicators, "soilQualityIndexLandOccupation",
52
+ default_val=None)
53
+ has_soil_quality_land_occupation_indicator = bool(soil_quality_occupation_indicator)
54
54
 
55
- land_transformation_indicator = find_term_match(land_indicators, "soilQualityIndexLandTransformation",
56
- default_val=None)
57
- has_land_transformation_indicator = bool(land_transformation_indicator)
55
+ soil_quality_transformation_indicator = find_term_match(soil_quality_indicators,
56
+ "soilQualityIndexLandTransformation",
57
+ default_val=None)
58
+ has_soil_quality_land_transformation_indicator = bool(soil_quality_transformation_indicator)
58
59
 
59
- has_valid_values = all([isinstance(indicator.get('value', None), (int, float)) for indicator in land_indicators])
60
+ has_valid_values = all(
61
+ [isinstance(indicator.get('value', None), (int, float)) for indicator in soil_quality_indicators])
60
62
 
61
63
  logRequirements(impactassessment, model=MODEL, term=TERM_ID,
62
- has_indicators=has_indicators,
63
- has_land_occupation_indicator=has_land_occupation_indicator,
64
- has_land_transformation_indicator=has_land_transformation_indicator,
64
+ has_soil_quality_indicators=has_soil_quality_indicators,
65
+ has_soil_quality_land_occupation_indicator=has_soil_quality_land_occupation_indicator,
66
+ has_soil_quality_land_transformation_indicator=has_soil_quality_land_transformation_indicator,
65
67
  has_valid_values=has_valid_values
66
68
  )
67
69
 
68
- should_run = all([has_indicators, has_valid_values,
69
- has_land_occupation_indicator, has_land_transformation_indicator])
70
+ should_run = all([has_soil_quality_indicators, has_valid_values,
71
+ has_soil_quality_land_occupation_indicator, has_soil_quality_land_transformation_indicator])
70
72
 
71
73
  logShouldRun(impactassessment, MODEL, TERM_ID, should_run)
72
- return should_run, land_indicators
74
+ return should_run, soil_quality_indicators
73
75
 
74
76
 
75
77
  def run(impactassessment: dict):
@@ -4,7 +4,7 @@ contained within the [ImpactAssesment.cycle](https://hestia.earth/schema/ImpactA
4
4
  100 years.
5
5
 
6
6
  It does this by multiplying the land occupation during the cycle by the
7
- [Site](https://www-staging.hestia.earth/schema/Site) area 100 years ago and dividing by 100.
7
+ [Site](https://hestia.earth/schema/Site) area 100 years ago and dividing by 100.
8
8
 
9
9
  Land transformation from [land type] 100 years =
10
10
  (Land occupation, during Cycle * Site Percentage Area 100 years ago [land type] / 100) / 100
@@ -4,7 +4,7 @@ contained within the [ImpactAssesment.cycle](https://hestia.earth/schema/ImpactA
4
4
  20 years.
5
5
 
6
6
  It does this by multiplying the land occupation during the cycle by the
7
- [Site](https://www-staging.hestia.earth/schema/Site) area 20 years ago and dividing by 20.
7
+ [Site](https://hestia.earth/schema/Site) area 20 years ago and dividing by 20.
8
8
 
9
9
  Land transformation from [land type] 20 years =
10
10
  (Land occupation, during Cycle * Site Percentage Area 20 years ago [land type] / 100) / 20
@@ -25,7 +25,7 @@ MODEL_KEY = 'value'
25
25
 
26
26
 
27
27
  def _run(impact: dict, product: dict):
28
- return {**impact.get('product'), MODEL_KEY: product.get(MODEL_KEY, [])}
28
+ return impact.get('product') | {MODEL_KEY: product.get(MODEL_KEY, [])}
29
29
 
30
30
 
31
31
  def _should_run(impact: dict):
@@ -170,7 +170,7 @@ def _should_run(site: dict) -> tuple[bool, dict, dict]:
170
170
  inventory = _compile_inventory(land_cover) if should_compile_inventory else {}
171
171
  kwargs = {
172
172
  "eco_climate_zone": eco_climate_zone,
173
- "seed": gen_seed(site)
173
+ "seed": gen_seed(site, MODEL, TERM_ID)
174
174
  }
175
175
 
176
176
  logRequirements(
@@ -542,7 +542,7 @@ def _measurement(
542
542
  max : list[float]
543
543
  A list of maximum values representing the maximum modelled biomass stock for each year of the inventory.
544
544
  statsDefinition : str
545
- The [statsDefinition](https://www-staging.hestia.earth/schema/Measurement#statsDefinition) of the measurement.
545
+ The [statsDefinition](https://hestia.earth/schema/Measurement#statsDefinition) of the measurement.
546
546
  observations : list[int]
547
547
  The number of model iterations used to calculate the descriptive statistics.
548
548