hestia-earth-models 0.65.10__py3-none-any.whl → 0.66.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. hestia_earth/models/cache_sites.py +7 -9
  2. hestia_earth/models/config/Cycle.json +34 -16
  3. hestia_earth/models/config/ImpactAssessment.json +12 -0
  4. hestia_earth/models/config/Site.json +4 -1
  5. hestia_earth/models/cycle/completeness/freshForage.py +10 -2
  6. hestia_earth/models/cycle/cropResidueManagement.py +3 -1
  7. hestia_earth/models/ecoinventV3/__init__.py +2 -1
  8. hestia_earth/models/environmentalFootprintV3/environmentalFootprintSingleOverallScore.py +135 -0
  9. hestia_earth/models/environmentalFootprintV3/soilQualityIndexLandTransformation.py +17 -6
  10. hestia_earth/models/geospatialDatabase/{aware.py → awareWaterBasinId.py} +1 -1
  11. hestia_earth/models/hestia/landCover.py +57 -39
  12. hestia_earth/models/hestia/residueRemoved.py +80 -0
  13. hestia_earth/models/hestia/resourceUse_utils.py +64 -38
  14. hestia_earth/models/hestia/utils.py +1 -2
  15. hestia_earth/models/ipcc2019/aboveGroundBiomass.py +33 -12
  16. hestia_earth/models/ipcc2019/animal/pastureGrass.py +1 -1
  17. hestia_earth/models/ipcc2019/belowGroundBiomass.py +32 -11
  18. hestia_earth/models/ipcc2019/ch4ToAirEntericFermentation.py +17 -8
  19. hestia_earth/models/ipcc2019/co2ToAirCarbonStockChange_utils.py +5 -3
  20. hestia_earth/models/ipcc2019/organicCarbonPerHa_tier_2_utils.py +27 -17
  21. hestia_earth/models/ipcc2019/pastureGrass.py +1 -1
  22. hestia_earth/models/ipcc2019/pastureGrass_utils.py +8 -1
  23. hestia_earth/models/log.py +1 -1
  24. hestia_earth/models/mocking/search-results.json +34 -34
  25. hestia_earth/models/pooreNemecek2018/freshwaterWithdrawalsDuringCycle.py +0 -1
  26. hestia_earth/models/pooreNemecek2018/landOccupationDuringCycle.py +13 -10
  27. hestia_earth/models/site/defaultMethodClassification.py +9 -2
  28. hestia_earth/models/site/defaultMethodClassificationDescription.py +4 -2
  29. hestia_earth/models/site/management.py +49 -31
  30. hestia_earth/models/site/pre_checks/cache_geospatialDatabase.py +19 -14
  31. hestia_earth/models/utils/blank_node.py +10 -4
  32. hestia_earth/models/utils/crop.py +1 -1
  33. hestia_earth/models/utils/cycle.py +3 -3
  34. hestia_earth/models/utils/lookup.py +1 -1
  35. hestia_earth/models/version.py +1 -1
  36. hestia_earth/orchestrator/strategies/merge/merge_list.py +17 -6
  37. {hestia_earth_models-0.65.10.dist-info → hestia_earth_models-0.66.0.dist-info}/METADATA +1 -1
  38. {hestia_earth_models-0.65.10.dist-info → hestia_earth_models-0.66.0.dist-info}/RECORD +59 -54
  39. tests/models/environmentalFootprintV3/test_environmentalFootprintSingleOverallScore.py +92 -0
  40. tests/models/environmentalFootprintV3/test_soilQualityIndexLandTransformation.py +4 -19
  41. tests/models/faostat2018/product/test_price.py +1 -1
  42. tests/models/geospatialDatabase/{test_aware.py → test_awareWaterBasinId.py} +1 -1
  43. tests/models/hestia/test_landCover.py +4 -2
  44. tests/models/hestia/test_landTransformation20YearAverageDuringCycle.py +3 -1
  45. tests/models/hestia/test_residueRemoved.py +20 -0
  46. tests/models/ipcc2019/test_aboveGroundBiomass.py +3 -1
  47. tests/models/ipcc2019/test_belowGroundBiomass.py +4 -2
  48. tests/models/ipcc2019/test_organicCarbonPerHa.py +94 -1
  49. tests/models/pooreNemecek2018/test_landOccupationDuringCycle.py +1 -3
  50. tests/models/site/pre_checks/test_cache_geospatialDatabase.py +22 -0
  51. tests/models/site/test_defaultMethodClassification.py +6 -0
  52. tests/models/site/test_defaultMethodClassificationDescription.py +6 -0
  53. tests/models/site/test_management.py +4 -4
  54. tests/models/test_cache_sites.py +2 -2
  55. tests/models/utils/test_crop.py +14 -2
  56. tests/orchestrator/strategies/merge/test_merge_list.py +11 -1
  57. {hestia_earth_models-0.65.10.dist-info → hestia_earth_models-0.66.0.dist-info}/LICENSE +0 -0
  58. {hestia_earth_models-0.65.10.dist-info → hestia_earth_models-0.66.0.dist-info}/WHEEL +0 -0
  59. {hestia_earth_models-0.65.10.dist-info → hestia_earth_models-0.66.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,80 @@
1
+ """
2
+ This model will gap-fill the value for `residueRemoved` when the only provided data is the incorporated residue.
3
+ We are assuming that anything that was not incorporated must have been removed.
4
+ """
5
+ from hestia_earth.schema import TermTermType
6
+ from hestia_earth.utils.model import filter_list_term_type
7
+ from hestia_earth.utils.tools import list_sum
8
+
9
+ from hestia_earth.models.log import logRequirements, logShouldRun, log_as_table
10
+ from hestia_earth.models.utils.completeness import _is_term_type_incomplete
11
+ from hestia_earth.models.utils.practice import _new_practice
12
+ from hestia_earth.models.utils import is_from_model
13
+ from . import MODEL
14
+
15
+ REQUIREMENTS = {
16
+ "Cycle": {
17
+ "completeness.cropResidue": "False",
18
+ "practices": [{
19
+ "@type": "Practice",
20
+ "term.@id": [
21
+ "residueIncorporated",
22
+ "residueIncorporatedLessThan30DaysBeforeCultivation",
23
+ "residueIncorporatedMoreThan30DaysBeforeCultivation"
24
+ ]
25
+ }],
26
+ "none": {
27
+ "practices": [{
28
+ "@type": "Practice",
29
+ "term.@id": [
30
+ "residueRemoved",
31
+ "residueBurnt",
32
+ "residueLeftOnField"
33
+ ]
34
+ }]
35
+ }
36
+ }
37
+ }
38
+ RETURNS = {
39
+ "Practice": [{
40
+ "value": ""
41
+ }]
42
+ }
43
+ TERM_ID = 'residueRemoved'
44
+
45
+
46
+ def _practice(value: float):
47
+ practice = _new_practice(TERM_ID, MODEL)
48
+ practice['value'] = [value]
49
+ return practice
50
+
51
+
52
+ def _should_run(cycle: dict):
53
+ crop_residue_incomplete = _is_term_type_incomplete(cycle, TermTermType.CROPRESIDUE)
54
+
55
+ practices = filter_list_term_type(cycle.get('practices', []), TermTermType.CROPRESIDUEMANAGEMENT)
56
+ incorporated_practices = [
57
+ {'id': p.get('term', {}).get('@id'), 'value': list_sum(p.get('value'), None)}
58
+ for p in practices
59
+ if p.get('term', {}).get('@id').startswith('residueIncorporated') and not is_from_model(p)
60
+ ]
61
+ has_other_practices = any([
62
+ not p.get('term', {}).get('@id').startswith('residueIncorporated')
63
+ for p in practices
64
+ ])
65
+ incorporated_value = list_sum([p.get('value') for p in incorporated_practices], None)
66
+
67
+ logRequirements(cycle, model=MODEL, term=TERM_ID,
68
+ term_type_cropResidue_incomplete=crop_residue_incomplete,
69
+ incorporated_practices=log_as_table(incorporated_practices),
70
+ incorporated_value=incorporated_value,
71
+ has_other_practices=has_other_practices)
72
+
73
+ should_run = all([crop_residue_incomplete, incorporated_value, not has_other_practices])
74
+ logShouldRun(cycle, MODEL, TERM_ID, should_run)
75
+ return should_run, 100 - (incorporated_value or 0)
76
+
77
+
78
+ def run(cycle: dict):
79
+ should_run, value = _should_run(cycle)
80
+ return [_practice(value)] if should_run else []
@@ -8,7 +8,7 @@ from dateutil.relativedelta import relativedelta
8
8
  from hestia_earth.schema import TermTermType
9
9
  from hestia_earth.utils.tools import to_precision
10
10
 
11
- from hestia_earth.models.log import logRequirements, logShouldRun
11
+ from hestia_earth.models.log import logRequirements, logShouldRun, debugValues
12
12
  from hestia_earth.models.utils.blank_node import _gapfill_datestr, DatestrGapfillMode, DatestrFormat, _str_dates_match
13
13
  from hestia_earth.models.utils.impact_assessment import get_site
14
14
  from hestia_earth.models.utils.indicator import _new_indicator
@@ -46,26 +46,25 @@ def _gap_filled_date_obj(date_str: str) -> datetime:
46
46
  )
47
47
 
48
48
 
49
- def _should_run_close_date_found(
50
- ia_end_date_str: str,
49
+ def _find_closest_node(
50
+ ia_date_str: str,
51
51
  management_nodes: list,
52
- historic_date_offset: int
53
- ) -> tuple[bool, str]:
52
+ historic_date_offset: int,
53
+ node_date_field: str
54
+ ) -> str:
54
55
  historic_ia_date_obj = (
55
- _gap_filled_date_obj(ia_end_date_str) - relativedelta(years=historic_date_offset)
56
- if ia_end_date_str else None
56
+ _gap_filled_date_obj(ia_date_str) - relativedelta(years=historic_date_offset)
57
+ if ia_date_str else None
57
58
  )
58
59
  # Calculate all distances in days which are less than MAXIMUM_OFFSET_DAYS from historic date
59
- # Assumption: if there are two dates are equidistant from the target chose the second.
60
+ # Assumption: if there are two dates are equidistant from the target, choose the second.
60
61
  filtered_dates = {
61
- abs((_gap_filled_date_obj(node.get("endDate")) - historic_ia_date_obj).days): node.get("endDate")
62
+ abs((_gap_filled_date_obj(node.get(node_date_field)) - historic_ia_date_obj).days): node.get(node_date_field)
62
63
  for node in management_nodes
63
64
  if node.get("term", {}).get("termType", "") == TermTermType.LANDCOVER.value and
64
- abs((_gap_filled_date_obj(node.get("endDate")) - historic_ia_date_obj).days) <= _MAXIMUM_OFFSET_DAYS
65
+ abs((_gap_filled_date_obj(node.get(node_date_field)) - historic_ia_date_obj).days) <= _MAXIMUM_OFFSET_DAYS
65
66
  }
66
- nearest_date = filtered_dates[min(filtered_dates.keys())] if filtered_dates else ""
67
-
68
- return nearest_date != "", nearest_date
67
+ return filtered_dates[min(filtered_dates.keys())] if filtered_dates else ""
69
68
 
70
69
 
71
70
  def should_run(
@@ -73,44 +72,59 @@ def should_run(
73
72
  site: dict,
74
73
  term_id: str,
75
74
  historic_date_offset: int
76
- ) -> tuple[bool, dict, str]:
75
+ ) -> tuple[bool, dict, str, str]:
77
76
  relevant_emission_resource_use = [
78
77
  node for node in impact_assessment.get("emissionsResourceUse", [])
79
78
  if node.get("term", {}).get("@id", "") == _RESOURCE_USE_TERM_ID and node.get("value", -1) >= 0
80
79
  ]
81
-
82
80
  filtered_management_nodes = [
83
81
  node for node in site.get("management", [])
84
82
  if node.get("value", -1) >= 0 and node.get("term", {}).get("termType", "") == TermTermType.LANDCOVER.value
85
83
  ]
84
+ land_occupation_during_cycle_found = any(
85
+ node.get("term", {}).get("@id") in
86
+ {node.get("landCover", {}).get("@id") for node in relevant_emission_resource_use}
87
+ for node in filtered_management_nodes
88
+ )
89
+ match_mode = (
90
+ DatestrGapfillMode.START if impact_assessment.get("cycle", {}).get("aggregated") is True
91
+ else DatestrGapfillMode.END
92
+ )
93
+ match_date = "startDate" if match_mode == DatestrGapfillMode.START else "endDate"
94
+
95
+ closest_date = _find_closest_node(
96
+ ia_date_str=impact_assessment.get(match_date, ""),
97
+ management_nodes=filtered_management_nodes,
98
+ historic_date_offset=historic_date_offset,
99
+ node_date_field=match_date
100
+ )
101
+ closest_start_date, closest_end_date = (closest_date, None) if match_date == "startDate" else (None, closest_date)
86
102
  current_node_index = next(
87
103
  (i for i, node in enumerate(filtered_management_nodes)
88
- if _str_dates_match(node.get("endDate", ""), impact_assessment.get("endDate", ""))),
104
+ if _str_dates_match(
105
+ date_str_one=node.get(match_date, ""),
106
+ date_str_two=impact_assessment.get(match_date, ""),
107
+ mode=match_mode
108
+ )),
89
109
  None
90
110
  )
91
111
  current_node = filtered_management_nodes.pop(current_node_index) if current_node_index is not None else None
92
112
 
93
- close_date_found, closest_date_str = _should_run_close_date_found(
94
- ia_end_date_str=impact_assessment.get("endDate", ""),
95
- management_nodes=filtered_management_nodes,
96
- historic_date_offset=historic_date_offset
97
- )
98
-
99
- logRequirements(
100
- log_node=impact_assessment,
101
- model=MODEL,
102
- term_id=term_id,
103
- site=site
104
- )
113
+ logRequirements(impact_assessment, model=MODEL, term=term_id,
114
+ closest_end_date=closest_end_date,
115
+ closest_start_date=closest_start_date,
116
+ has_landOccupationDuringCycle=land_occupation_during_cycle_found,
117
+ landCover_term_id=(current_node or {}).get('term', {}).get('@id'))
105
118
 
106
119
  should_run_result = all([
107
- relevant_emission_resource_use != [],
120
+ relevant_emission_resource_use,
121
+ land_occupation_during_cycle_found,
108
122
  current_node,
109
- close_date_found
123
+ closest_end_date or closest_start_date
110
124
  ])
111
- logShouldRun(site, MODEL, term=term_id, should_run=should_run_result)
125
+ logShouldRun(impact_assessment, MODEL, term=term_id, should_run=should_run_result)
112
126
 
113
- return should_run_result, current_node, closest_date_str
127
+ return should_run_result, current_node, closest_end_date, closest_start_date
114
128
 
115
129
 
116
130
  def _get_land_occupation_for_land_use_type(impact_assessment: dict, ipcc_land_use_category: str) -> float:
@@ -126,6 +140,7 @@ def _get_land_occupation_for_land_use_type(impact_assessment: dict, ipcc_land_us
126
140
 
127
141
  def _calculate_indicator_value(
128
142
  impact_assessment: dict,
143
+ term_id: str,
129
144
  management_nodes: list,
130
145
  ipcc_land_use_category: str,
131
146
  previous_land_cover_id: str,
@@ -143,13 +158,21 @@ def _calculate_indicator_value(
143
158
  node.get("value", 0) for node in management_nodes
144
159
  if node.get("term", {}).get("@id", "") == previous_land_cover_id
145
160
  )
161
+
162
+ debugValues(impact_assessment, model=MODEL, term=term_id,
163
+ ipcc_land_use_category=ipcc_land_use_category,
164
+ land_occupation_for_cycle=land_occupation_for_cycle,
165
+ historical_land_use=historical_land_use,
166
+ historic_date_offset=historic_date_offset)
167
+
146
168
  return ((land_occupation_for_cycle * historical_land_use) / 100) / historic_date_offset
147
169
 
148
170
 
149
171
  def _run_calculate_transformation(
150
172
  term_id: str,
151
173
  current_node: dict,
152
- closest_date_str: str,
174
+ closest_end_date: str,
175
+ closest_start_date: str,
153
176
  impact_assessment: dict,
154
177
  site: dict,
155
178
  historic_date_offset: int
@@ -164,9 +187,11 @@ def _run_calculate_transformation(
164
187
  previous_land_cover_id=previous_land_cover_id,
165
188
  value=_calculate_indicator_value(
166
189
  impact_assessment=impact_assessment,
190
+ term_id=term_id,
167
191
  management_nodes=[
168
192
  node for node in site.get("management", [])
169
- if _str_dates_match(node.get("endDate", ""), closest_date_str)
193
+ if _str_dates_match(node.get("endDate", ""), closest_end_date) or
194
+ _str_dates_match(node.get("startDate", ""), closest_start_date)
170
195
  ],
171
196
  ipcc_land_use_category=crop_ipcc_land_use_category(current_node.get("term", {}).get("@id", "")),
172
197
  previous_land_cover_id=previous_land_cover_id,
@@ -184,17 +209,18 @@ def run_resource_use(
184
209
  term_id: str
185
210
  ) -> list:
186
211
  site = get_site(impact_assessment)
187
- _should_run, current_node, closest_date_str = should_run(
212
+ _should_run, current_node, closest_end_date, closest_start_date = should_run(
188
213
  impact_assessment=impact_assessment,
189
214
  site=site,
190
215
  term_id=term_id,
191
216
  historic_date_offset=historic_date_offset
192
217
  )
193
218
  return _run_calculate_transformation(
219
+ impact_assessment=impact_assessment,
220
+ site=site,
194
221
  term_id=term_id,
195
222
  current_node=current_node,
196
- closest_date_str=closest_date_str,
197
- site=site,
198
- impact_assessment=impact_assessment,
223
+ closest_end_date=closest_end_date,
224
+ closest_start_date=closest_start_date,
199
225
  historic_date_offset=historic_date_offset
200
226
  ) if _should_run else []
@@ -43,6 +43,5 @@ def crop_ipcc_land_use_category(
43
43
  return get_lookup_value(
44
44
  lookup_term={"@id": crop_term_id, "type": "Term", "termType": lookup_term_type},
45
45
  column='IPCC_LAND_USE_CATEGORY',
46
- model=MODEL,
47
- term={"@id": crop_term_id, "type": "Term", "termType": lookup_term_type}
46
+ model=MODEL
48
47
  )
@@ -229,6 +229,7 @@ def _compile_inventory(land_cover_nodes: list[dict]) -> dict:
229
229
  The inventory of data.
230
230
  """
231
231
  land_cover_grouped = group_nodes_by_year(land_cover_nodes)
232
+ min_year, max_year = min(land_cover_grouped.keys()), max(land_cover_grouped.keys())
232
233
 
233
234
  def build_inventory_year(inventory: dict, year_pair: tuple[int, int]) -> dict:
234
235
  """
@@ -262,15 +263,33 @@ def _compile_inventory(land_cover_nodes: list[dict]) -> dict:
262
263
  years_since_lcc_event = time_delta if is_lcc_event else prev_years_since_lcc_event + time_delta
263
264
  regime_start_year = current_year - years_since_lcc_event
264
265
 
266
+ equilibrium_year = regime_start_year + _EQUILIBRIUM_TRANSITION_PERIOD
267
+ inventory_years = set(list(inventory.keys()) + list(land_cover_grouped.keys()))
268
+
269
+ should_add_equilibrium_year = (
270
+ min_year < equilibrium_year < max_year # Is the year relevant?
271
+ and equilibrium_year not in inventory_years # Is the year missing?
272
+ and equilibrium_year < current_year # Is it the first inventory year after the equilibrium?
273
+ )
274
+
275
+ current_data = {
276
+ _InventoryKey.BIOMASS_CATEGORY_SUMMARY: biomass_category_summary,
277
+ _InventoryKey.LAND_COVER_SUMMARY: land_cover_summary,
278
+ _InventoryKey.LAND_COVER_CHANGE_EVENT: is_lcc_event,
279
+ _InventoryKey.YEARS_SINCE_LCC_EVENT: years_since_lcc_event,
280
+ _InventoryKey.REGIME_START_YEAR: regime_start_year
281
+ }
282
+
283
+ equilibrium_data = {
284
+ **current_data,
285
+ _InventoryKey.YEARS_SINCE_LCC_EVENT: _EQUILIBRIUM_TRANSITION_PERIOD
286
+ }
287
+
265
288
  update_dict = {
266
- current_year: {
267
- _InventoryKey.BIOMASS_CATEGORY_SUMMARY: biomass_category_summary,
268
- _InventoryKey.LAND_COVER_SUMMARY: land_cover_summary,
269
- _InventoryKey.LAND_COVER_CHANGE_EVENT: is_lcc_event,
270
- _InventoryKey.YEARS_SINCE_LCC_EVENT: years_since_lcc_event,
271
- _InventoryKey.REGIME_START_YEAR: regime_start_year
272
- }
289
+ current_year: current_data,
290
+ **({equilibrium_year: equilibrium_data} if should_add_equilibrium_year else {})
273
291
  }
292
+
274
293
  return inventory | update_dict
275
294
 
276
295
  start_year = list(land_cover_grouped)[0]
@@ -290,11 +309,13 @@ def _compile_inventory(land_cover_nodes: list[dict]) -> dict:
290
309
  }
291
310
  }
292
311
 
293
- return reduce(
294
- build_inventory_year,
295
- pairwise(land_cover_grouped.keys()), # Inventory years need data from previous year to be compiled.
296
- initial
297
- )
312
+ return dict(sorted(
313
+ reduce(
314
+ build_inventory_year,
315
+ pairwise(land_cover_grouped.keys()), # Inventory years need data from previous year to be compiled.
316
+ initial
317
+ ).items()
318
+ ))
298
319
 
299
320
 
300
321
  def _format_inventory(inventory: dict) -> str:
@@ -233,7 +233,7 @@ def _run_practice(animal: dict, values: dict, meanDE: float, meanECHHV: float, R
233
233
  'weightAtOneYear',
234
234
  'weightAtSlaughter'
235
235
  ])
236
- has_positive_feed_values = all([NEm_feed > 0, NEg_feed > 0])
236
+ has_positive_feed_values = all([NEm_feed >= 0, NEg_feed >= 0])
237
237
 
238
238
  logRequirements(animal, model=MODEL, term=input_term_id, model_key=MODEL_KEY,
239
239
  feed_logs=log_as_table(log_feed),
@@ -222,6 +222,7 @@ def _compile_inventory(land_cover_nodes: list[dict]) -> dict:
222
222
  The inventory of data.
223
223
  """
224
224
  land_cover_grouped = group_nodes_by_year(land_cover_nodes)
225
+ min_year, max_year = min(land_cover_grouped.keys()), max(land_cover_grouped.keys())
225
226
 
226
227
  def build_inventory_year(inventory: dict, year_pair: tuple[int, int]) -> dict:
227
228
  """
@@ -253,14 +254,32 @@ def _compile_inventory(land_cover_nodes: list[dict]) -> dict:
253
254
  years_since_lcc_event = time_delta if is_lcc_event else prev_years_since_lcc_event + time_delta
254
255
  regime_start_year = current_year - years_since_lcc_event
255
256
 
257
+ equilibrium_year = regime_start_year + _EQUILIBRIUM_TRANSITION_PERIOD
258
+ inventory_years = set(list(inventory.keys()) + list(land_cover_grouped.keys()))
259
+
260
+ should_add_equilibrium_year = (
261
+ min_year < equilibrium_year < max_year # Is the year relevant?
262
+ and equilibrium_year not in inventory_years # Is the year missing?
263
+ and equilibrium_year < current_year # Is it the first inventory year after the equilibrium?
264
+ )
265
+
266
+ current_data = {
267
+ _InventoryKey.BIOMASS_CATEGORY_SUMMARY: biomass_category_summary,
268
+ _InventoryKey.LAND_COVER_CHANGE_EVENT: is_lcc_event,
269
+ _InventoryKey.YEARS_SINCE_LCC_EVENT: years_since_lcc_event,
270
+ _InventoryKey.REGIME_START_YEAR: regime_start_year
271
+ }
272
+
273
+ equilibrium_data = {
274
+ **current_data,
275
+ _InventoryKey.YEARS_SINCE_LCC_EVENT: _EQUILIBRIUM_TRANSITION_PERIOD
276
+ }
277
+
256
278
  update_dict = {
257
- current_year: {
258
- _InventoryKey.BIOMASS_CATEGORY_SUMMARY: biomass_category_summary,
259
- _InventoryKey.LAND_COVER_CHANGE_EVENT: is_lcc_event,
260
- _InventoryKey.YEARS_SINCE_LCC_EVENT: years_since_lcc_event,
261
- _InventoryKey.REGIME_START_YEAR: regime_start_year
262
- }
279
+ current_year: current_data,
280
+ **({equilibrium_year: equilibrium_data} if should_add_equilibrium_year else {})
263
281
  }
282
+
264
283
  return inventory | update_dict
265
284
 
266
285
  start_year = list(land_cover_grouped)[0]
@@ -277,11 +296,13 @@ def _compile_inventory(land_cover_nodes: list[dict]) -> dict:
277
296
  }
278
297
  }
279
298
 
280
- return reduce(
281
- build_inventory_year,
282
- pairwise(land_cover_grouped.keys()), # Inventory years need data from previous year to be compiled.
283
- initial
284
- )
299
+ return dict(sorted(
300
+ reduce(
301
+ build_inventory_year,
302
+ pairwise(land_cover_grouped.keys()), # Inventory years need data from previous year to be compiled.
303
+ initial
304
+ ).items()
305
+ ))
285
306
 
286
307
 
287
308
  def _format_inventory(inventory: dict) -> str:
@@ -237,24 +237,33 @@ def _should_run(cycle: dict):
237
237
 
238
238
  # only keep inputs that have a positive value
239
239
  inputs = list(filter(lambda i: list_sum(i.get('value', [])) > 0, feed_inputs))
240
- DE = (
241
- get_total_value_converted_with_min_ratio(MODEL, TERM_ID, cycle, inputs, DE_type) if DE_type else None
242
- ) or get_default_digestibility(MODEL, TERM_ID, cycle)
243
- NDF = get_total_value_converted_with_min_ratio(MODEL, TERM_ID, cycle, inputs, 'neutralDetergentFibreContent')
240
+ DE = get_total_value_converted_with_min_ratio(
241
+ MODEL, TERM_ID, cycle, inputs, prop_id=DE_type, is_sum=False
242
+ ) if DE_type else None
243
+ # set as a percentage in the properties
244
+ DE = DE * 100 if DE else DE
245
+ DE_default = get_default_digestibility(MODEL, TERM_ID, cycle)
246
+
247
+ # set as a percentage in the properties
248
+ NDF = get_total_value_converted_with_min_ratio(
249
+ MODEL, TERM_ID, cycle, inputs, prop_id='neutralDetergentFibreContent', is_sum=False
250
+ )
251
+ NDF = NDF * 100 if NDF else NDF
244
252
 
245
253
  enteric_factor = safe_parse_float(_get_lookup_value(
246
- lookup, term, LOOKUPS['liveAnimal'][1], DE, NDF, ionophore, milk_yield
254
+ lookup, term, LOOKUPS['liveAnimal'][1], DE or DE_default, NDF, ionophore, milk_yield
247
255
  ), None)
248
256
  enteric_sd = safe_parse_float(_get_lookup_value(
249
- lookup, term, LOOKUPS['liveAnimal'][2], DE, NDF, ionophore, milk_yield
257
+ lookup, term, LOOKUPS['liveAnimal'][2], DE or DE_default, NDF, ionophore, milk_yield
250
258
  ), None)
251
259
 
252
260
  default_values = _get_default_values(lookup, term)
253
261
 
254
262
  debugValues(cycle, model=MODEL, term=TERM_ID,
255
263
  DE_type=DE_type,
256
- digestibility=DE,
257
- ndf=NDF,
264
+ DE=DE,
265
+ **({'DE_default_lookup': DE_default} if not DE else {}),
266
+ NDF=NDF,
258
267
  ionophore=ionophore,
259
268
  milk_yield=milk_yield,
260
269
  enteric_factor=enteric_factor,
@@ -1315,7 +1315,8 @@ def _format_land_use_inventory(land_use_inventory: dict) -> str:
1315
1315
  """
1316
1316
  KEYS = [
1317
1317
  _InventoryKey.LAND_USE_CHANGE_EVENT,
1318
- _InventoryKey.YEARS_SINCE_LUC_EVENT
1318
+ _InventoryKey.YEARS_SINCE_LUC_EVENT,
1319
+ _InventoryKey.YEARS_SINCE_INVENTORY_START
1319
1320
  ]
1320
1321
 
1321
1322
  inventory_years = sorted(set(non_empty_list(years for years in land_use_inventory.keys())))
@@ -1374,7 +1375,8 @@ def _format_named_tuple(value: Optional[Union[CarbonStock, CarbonStockChange, Ca
1374
1375
 
1375
1376
  _LAND_USE_INVENTORY_KEY_TO_FORMAT_FUNC = {
1376
1377
  _InventoryKey.LAND_USE_CHANGE_EVENT: _format_bool,
1377
- _InventoryKey.YEARS_SINCE_LUC_EVENT: _format_int
1378
+ _InventoryKey.YEARS_SINCE_LUC_EVENT: _format_int,
1379
+ _InventoryKey.YEARS_SINCE_INVENTORY_START: _format_int
1378
1380
  }
1379
1381
  """
1380
1382
  Map inventory keys to format functions. The columns in inventory logged as a table will also be sorted in the order of
@@ -1416,7 +1418,7 @@ def create_run_function(
1416
1418
  years_since_inventory_start = data[_InventoryKey.YEARS_SINCE_INVENTORY_START]
1417
1419
 
1418
1420
  is_luc_emission = bool(years_since_luc_event) and years_since_luc_event <= _TRANSITION_PERIOD_YEARS
1419
- is_data_complete = bool(years_since_inventory_start) and years_since_inventory_start > _TRANSITION_PERIOD_YEARS
1421
+ is_data_complete = bool(years_since_inventory_start) and years_since_inventory_start >= _TRANSITION_PERIOD_YEARS
1420
1422
 
1421
1423
  if is_luc_emission:
1422
1424
  # If LUC emission allocate emissions to land use change AND add corresponding zero emission to management
@@ -62,6 +62,7 @@ _ABOVE_GROUND_CROP_RESIDUE_TOTAL_TERM_ID = "aboveGroundCropResidueTotal"
62
62
  _CARBON_CONTENT_TERM_ID = "carbonContent"
63
63
  _NITROGEN_CONTENT_TERM_ID = "nitrogenContent"
64
64
  _LIGNIN_CONTENT_TERM_ID = "ligninContent"
65
+ _DRY_MATTER_TERM_ID = "dryMatter"
65
66
 
66
67
  _CROP_RESIDUE_MANAGEMENT_TERM_IDS = [
67
68
  "residueIncorporated",
@@ -82,7 +83,8 @@ _DEFAULT_COVER_CROP_BIOMASS = 4000 # TODO: Confirm assumption, Source PAS 2050-
82
83
  _CARBON_INPUT_PROPERTY_TERM_IDS = [
83
84
  _CARBON_CONTENT_TERM_ID,
84
85
  _NITROGEN_CONTENT_TERM_ID,
85
- _LIGNIN_CONTENT_TERM_ID
86
+ _LIGNIN_CONTENT_TERM_ID,
87
+ _DRY_MATTER_TERM_ID
86
88
  ]
87
89
 
88
90
  _CARBON_SOURCE_TERM_TYPES = [
@@ -1380,9 +1382,13 @@ def _calc_carbon_source_ag_crop_residue(node: dict, cycle: dict) -> Union[Carbon
1380
1382
  ])
1381
1383
  mass = value * max(residue_left_on_field, _MIN_RESIDUE_LEFT_ON_FIELD) / 100
1382
1384
 
1385
+ carbon_content, nitrogen_content, lignin_content, dry_matter = _retrieve_carbon_source_properties(node)
1386
+
1383
1387
  carbon_source = CarbonSource(
1384
- mass,
1385
- *_retrieve_carbon_source_properties(node)
1388
+ mass * dry_matter if dry_matter else mass,
1389
+ carbon_content / dry_matter if dry_matter else carbon_content,
1390
+ nitrogen_content / dry_matter if dry_matter else nitrogen_content,
1391
+ lignin_content / dry_matter if dry_matter else lignin_content
1386
1392
  )
1387
1393
 
1388
1394
  return carbon_source if _validate_carbon_source(carbon_source) else None
@@ -1390,7 +1396,7 @@ def _calc_carbon_source_ag_crop_residue(node: dict, cycle: dict) -> Union[Carbon
1390
1396
 
1391
1397
  def _should_run_carbon_source_cover_crop(node: dict) -> bool:
1392
1398
  """
1393
- Determine whether a product is a valid above cover crop carbon source.
1399
+ Determine whether a product is a valid cover crop carbon source.
1394
1400
 
1395
1401
  Parameters
1396
1402
  ----------
@@ -1404,13 +1410,13 @@ def _should_run_carbon_source_cover_crop(node: dict) -> bool:
1404
1410
  Whether the node satisfies the critera.
1405
1411
  """
1406
1412
  LOOKUP = _LOOKUPS["landCover"]
1407
- return all([
1408
- node.get("term", {}).get("termType") in [TermTermType.LANDCOVER.value],
1409
- node_lookup_match(
1410
- node, LOOKUP, IPCC_LAND_USE_CATEGORY_TO_LAND_COVER_LOOKUP_VALUE[IpccLandUseCategory.ANNUAL_CROPS]
1411
- ),
1412
- is_cover_crop(node)
1413
- ])
1413
+ TARGET_LOOKUP_VALUES = IPCC_LAND_USE_CATEGORY_TO_LAND_COVER_LOOKUP_VALUE[IpccLandUseCategory.ANNUAL_CROPS]
1414
+
1415
+ return (
1416
+ node.get("term", {}).get("termType") in [TermTermType.LANDCOVER.value]
1417
+ and is_cover_crop(node)
1418
+ and node_lookup_match(node, LOOKUP, TARGET_LOOKUP_VALUES)
1419
+ )
1414
1420
 
1415
1421
 
1416
1422
  def _calc_carbon_source_cover_crop(node: dict, *_) -> Union[CarbonSource, None]:
@@ -1475,15 +1481,20 @@ def _calc_carbon_source(node: dict, *_) -> Union[CarbonSource, None]:
1475
1481
  CarbonSource | None
1476
1482
  The carbon source data of the cover crop, or `None` if carbon source data incomplete.
1477
1483
  """
1484
+ mass = get_node_value(node)
1485
+ carbon_content, nitrogen_content, lignin_content, dry_matter = _retrieve_carbon_source_properties(node)
1486
+
1478
1487
  carbon_source = CarbonSource(
1479
- get_node_value(node),
1480
- *_retrieve_carbon_source_properties(node)
1488
+ mass * dry_matter if dry_matter else mass,
1489
+ carbon_content / dry_matter if dry_matter else carbon_content,
1490
+ nitrogen_content / dry_matter if dry_matter else nitrogen_content,
1491
+ lignin_content / dry_matter if dry_matter else lignin_content
1481
1492
  )
1482
1493
 
1483
1494
  return carbon_source if _validate_carbon_source(carbon_source) else None
1484
1495
 
1485
1496
 
1486
- def _retrieve_carbon_source_properties(node: dict) -> tuple[float, float, float]:
1497
+ def _retrieve_carbon_source_properties(node: dict) -> tuple[float, float, float, float]:
1487
1498
  """
1488
1499
  Extract the carbon source properties from an input or product node or, if required, retrieve them from default
1489
1500
  properties.
@@ -1497,12 +1508,11 @@ def _retrieve_carbon_source_properties(node: dict) -> tuple[float, float, float]
1497
1508
  Returns
1498
1509
  -------
1499
1510
  tuple[float, float, float]
1500
- `(carbon_content, nitrogen_content, lignin_content)`
1511
+ `(carbon_content, nitrogen_content, lignin_content, dry_matter)`
1501
1512
  """
1502
- carbon_content, nitrogen_content, lignin_content = (
1513
+ return (
1503
1514
  get_node_property(node, term_id).get("value", 0)/100 for term_id in _CARBON_INPUT_PROPERTY_TERM_IDS
1504
1515
  )
1505
- return carbon_content, nitrogen_content, lignin_content
1506
1516
 
1507
1517
 
1508
1518
  def _validate_carbon_source(carbon_source: CarbonSource) -> bool:
@@ -212,7 +212,7 @@ def _run_practice(cycle: dict, meanDE: float, meanECHHV: float, REM: float, REG:
212
212
  'weightAtOneYear',
213
213
  'weightAtSlaughter'
214
214
  ])
215
- has_positive_feed_values = all([NEm_feed > 0, NEg_feed > 0])
215
+ has_positive_feed_values = all([NEm_feed >= 0, NEg_feed >= 0])
216
216
 
217
217
  logRequirements(cycle, model=MODEL, term=input_term_id, model_key=MODEL_KEY,
218
218
  feed_logs=log_as_table(log_feed),
@@ -323,7 +323,14 @@ def calculate_GE(values: list, REM: float, REG: float, NEwool: float, NEm_feed:
323
323
  NEp = _sum_values(values, 'NEp')
324
324
  NEg = _sum_values(values, 'NEg')
325
325
 
326
- return ((NEm + NEa + NEl + NEwork + NEp - NEm_feed)/REM + (NEg + NEwool - NEg_feed)/REG) if all([REM, REG]) else 0
326
+ REM_factor = NEm + NEa + NEl + NEwork + NEp
327
+ REG_factor = NEg + NEwool
328
+
329
+ correction_factor = REM_factor + REG_factor
330
+ NEm_feed_corrected = NEm_feed * REM_factor/correction_factor if correction_factor != 0 else NEm_feed
331
+ NEg_feed_corrected = NEg_feed * REG_factor/correction_factor if correction_factor != 0 else NEg_feed
332
+
333
+ return ((REM_factor - NEm_feed_corrected)/REM + (REG_factor - NEg_feed_corrected)/REG) if all([REM, REG]) else 0
327
334
 
328
335
 
329
336
  def calculate_meanECHHV(practices: list, **log_args) -> float:
@@ -65,7 +65,7 @@ def logShouldRun(log_node: dict, model: str, term: Union[str, None], should_run:
65
65
  def debugMissingLookup(lookup_name: str, row: str, row_value: str, col: str, value, **kwargs):
66
66
  if value is None or value == '':
67
67
  extra = (', ' + _join_args(**kwargs)) if len(kwargs.keys()) > 0 else ''
68
- logger.warn('Missing lookup=%s, %s=%s, column=%s' + extra, lookup_name, row, row_value, col)
68
+ logger.warning('Missing lookup=%s, %s=%s, column=%s' + extra, lookup_name, row, row_value, col)
69
69
 
70
70
 
71
71
  def logErrorRun(model: str, term: str, error: str):