hestia-earth-models 0.74.13__py3-none-any.whl → 0.74.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hestia-earth-models might be problematic. Click here for more details.

@@ -339,7 +339,8 @@
339
339
  "runStrategy": "always",
340
340
  "mergeStrategy": "list",
341
341
  "mergeArgs": {
342
- "replaceThreshold": ["value", 0.01]
342
+ "replaceThreshold": ["value", 0.01],
343
+ "sameMethodModel": true
343
344
  },
344
345
  "stage": 1
345
346
  },
@@ -962,7 +963,8 @@
962
963
  "runStrategy": "add_blank_node_if_missing",
963
964
  "mergeStrategy": "list",
964
965
  "mergeArgs": {
965
- "replaceThreshold": ["value", 0.01]
966
+ "replaceThreshold": ["value", 0.01],
967
+ "sameMethodModel": true
966
968
  },
967
969
  "stage": 1
968
970
  },
@@ -12,7 +12,10 @@ from hestia_earth.models.utils.background_emissions import (
12
12
  parse_term_id,
13
13
  process_input_mappings
14
14
  )
15
- from .utils import get_input_mappings, extract_input_mapping
15
+ from .utils import (
16
+ get_input_mappings, extract_input_mapping,
17
+ CUTOFF_KEY, get_cutoff_lookup, cutoff_value, filter_blank_nodes_cutoff
18
+ )
16
19
  from . import MODEL
17
20
 
18
21
  REQUIREMENTS = {
@@ -63,7 +66,14 @@ MODEL_KEY = 'cycle'
63
66
  TIER = EmissionMethodTier.BACKGROUND.value
64
67
 
65
68
 
66
- def _emission(term_id: str, value: float, input: dict, country_id: str = None, key_id: str = None):
69
+ def _emission(
70
+ term_id: str,
71
+ value: float,
72
+ input: dict,
73
+ country_id: str = None,
74
+ key_id: str = None,
75
+ cutoff_lookup=None
76
+ ):
67
77
  emission = _new_emission(term=term_id, model=MODEL, value=value, country_id=country_id, key_id=key_id)
68
78
  emission['methodTier'] = TIER
69
79
  emission['inputs'] = [input.get('term')]
@@ -71,10 +81,11 @@ def _emission(term_id: str, value: float, input: dict, country_id: str = None, k
71
81
  emission['operation'] = input.get('operation')
72
82
  if input.get('animal'):
73
83
  emission['animals'] = [input.get('animal')]
74
- return emission
84
+ cutoff = cutoff_value(cutoff_lookup, term_id=term_id, country_id=country_id, key_id=key_id)
85
+ return emission | ({CUTOFF_KEY: value * cutoff} if cutoff is not None else {})
75
86
 
76
87
 
77
- def _run_input(cycle: dict):
88
+ def _run_input(cycle: dict, cutoff_lookup=None):
78
89
  no_gap_filled_background_emissions_func = no_gap_filled_background_emissions(cycle)
79
90
  log_missing_emissions_func = log_missing_emissions(cycle, model=MODEL, methodTier=TIER)
80
91
 
@@ -122,6 +133,7 @@ def _run_input(cycle: dict):
122
133
  input=input,
123
134
  country_id=values[0].get('country'),
124
135
  key_id=values[0].get('key'),
136
+ cutoff_lookup=cutoff_lookup
125
137
  )
126
138
  for term_id, values in results.items()
127
139
  ]
@@ -131,4 +143,7 @@ def _run_input(cycle: dict):
131
143
  def run(cycle: dict):
132
144
  inputs = get_background_inputs(cycle)
133
145
  grouped_inputs = group_by_keys(inputs, ['term', 'operation', 'animal'])
134
- return flatten(map(_run_input(cycle), grouped_inputs.values()))
146
+ # used to calculate the global coefficient
147
+ cutoff_lookup = get_cutoff_lookup(term_type=TermTermType.EMISSION)
148
+ emissions = flatten(map(_run_input(cycle, cutoff_lookup), grouped_inputs.values()))
149
+ return emissions if cutoff_lookup is None else filter_blank_nodes_cutoff(emissions)
@@ -1,7 +1,8 @@
1
+ import os
1
2
  from functools import lru_cache
2
3
  from hestia_earth.schema import TermTermType
3
- from hestia_earth.utils.lookup import download_lookup, column_name
4
- from hestia_earth.utils.tools import non_empty_list
4
+ from hestia_earth.utils.lookup import download_lookup, get_table_value, column_name
5
+ from hestia_earth.utils.tools import non_empty_list, safe_parse_float, omit
5
6
 
6
7
  from hestia_earth.models.utils.term import get_lookup_value
7
8
  from hestia_earth.models.utils.background_emissions import convert_background_lookup
@@ -35,3 +36,52 @@ def _build_lookup(term_type: str):
35
36
  def ecoalim_values(mapping: str, term_type: TermTermType):
36
37
  data = _build_lookup(term_type.value)
37
38
  return list(data[mapping].items())
39
+
40
+
41
+ CUTOFF_KEY = 'cutoff_coeff'
42
+ _CUTOFF_MAX_PERCENTAGE = int(os.getenv('ECOALIM_CUTOFF_MAX_PERCENT', '99'))
43
+
44
+
45
+ def get_cutoff_lookup(term_type: TermTermType):
46
+ filename = f"ecoalim-{term_type.value}-cutoff.csv"
47
+ return download_lookup(filename, keep_in_memory=False) if _CUTOFF_MAX_PERCENTAGE else None
48
+
49
+
50
+ def _cutoff_id(term_id: str, country_id: str = None, key_id: str = None):
51
+ return term_id + (
52
+ f"+inputs[{key_id}]" if key_id else ''
53
+ ) + (
54
+ f"+country[{country_id}]" if country_id else ''
55
+ )
56
+
57
+
58
+ def cutoff_value(cutoff_lookup, term_id: str, country_id: str = None, key_id: str = None):
59
+ cutoff_id = _cutoff_id(term_id=term_id, country_id=country_id, key_id=key_id)
60
+ return None if cutoff_lookup is None else safe_parse_float(
61
+ get_table_value(cutoff_lookup, 'termid', cutoff_id, 'percentage'),
62
+ default=None
63
+ )
64
+
65
+
66
+ def filter_blank_nodes_cutoff(blank_nodes: list):
67
+ # use the generic contibution of the blank node towards EF Score to remove the lowest percentage
68
+ total_contributions = sum([
69
+ v.get(CUTOFF_KEY, 0) for v in blank_nodes
70
+ ])
71
+ blank_nodes_with_contributions = sorted([
72
+ (
73
+ v,
74
+ v.get(CUTOFF_KEY, 0) * 100 / total_contributions
75
+ )
76
+ for v in blank_nodes
77
+ ], key=lambda v: v[1], reverse=True)
78
+
79
+ sum_contributions = 0
80
+ filtered_blank_nodes = []
81
+ for blank_node, contribution in blank_nodes_with_contributions:
82
+ sum_contributions = sum_contributions + contribution
83
+ if sum_contributions > _CUTOFF_MAX_PERCENTAGE:
84
+ break
85
+ filtered_blank_nodes.append(omit(blank_node, [CUTOFF_KEY]))
86
+
87
+ return filtered_blank_nodes
@@ -58,8 +58,7 @@ def _get_factor(term: dict, column: str) -> Optional[float]:
58
58
 
59
59
 
60
60
  def _get_pef_method_model(term: dict) -> List[str]:
61
- entries = get_lookup_value(term, method_model_colum, model=MODEL, term=TERM_ID) or ''
62
- return entries.split(";")
61
+ return (get_lookup_value(term, method_model_colum, skip_debug=True) or '').split(";")
63
62
 
64
63
 
65
64
  def _indicator_factors(impact_assessment: dict, indicator: dict):
@@ -110,9 +109,11 @@ def _run(indicators: List[dict]):
110
109
  return _indicator(value=list_sum([indicator["weighted-value"] for indicator in indicators]))
111
110
 
112
111
 
113
- def _valid_indicator(indicator: Optional[dict]) -> bool:
114
- value = None if indicator is None else _node_value(indicator)
115
- return isinstance(value, (int, float)) and _is_a_PEF_indicator(indicator)
112
+ def _valid_indicator(indicator: Optional[dict]):
113
+ return isinstance(_node_value(indicator), (int, float))
114
+
115
+
116
+ def _log_indicators(indicators: list): return ';'.join([v['indicator'] for v in indicators])
116
117
 
117
118
 
118
119
  def _should_run(impact_assessment: dict) -> Tuple[bool, list[dict]]:
@@ -123,25 +124,33 @@ def _should_run(impact_assessment: dict) -> Tuple[bool, list[dict]]:
123
124
  ]
124
125
  has_pef_indicators = bool(indicators)
125
126
 
126
- processed_indicators = [{
127
- "indicator": indicator['term']['@id'],
128
- "valid-indicator": _valid_indicator(indicator),
129
- "one-indicator-for-category": _count_duplicate_indicators(indicator, indicators) == 1,
130
- "indicator-pef-category": indicator.get('term', {}).get('@id'),
131
- } | _indicator_factors(impact_assessment, indicator) for indicator in indicators]
127
+ processed_indicators = [
128
+ {
129
+ "indicator": indicator['term']['@id'],
130
+ "methodModel": indicator.get('methodModel', {}).get("@id"),
131
+ "valid-value": _valid_indicator(indicator),
132
+ "count-indicators": _count_duplicate_indicators(indicator, indicators),
133
+ "PEF-category": indicator.get('term', {}).get('@id')
134
+ } | _indicator_factors(impact_assessment, indicator)
135
+ for indicator in indicators
136
+ ]
137
+
138
+ duplicate_indicators = [v for v in processed_indicators if v['count-indicators'] > 1]
139
+
140
+ invalid_indicators = [v for v in processed_indicators if not v['valid-value']]
141
+ valid_indicators = [v for v in processed_indicators if v['valid-value']]
132
142
 
133
- no_duplicate_indicators = all([indicator['one-indicator-for-category'] for indicator in processed_indicators])
134
- valid_indicators = [indicator for indicator in processed_indicators if indicator['valid-indicator']]
135
- all_indicators_valid = all([indicator['valid-indicator'] for indicator in processed_indicators])
143
+ all_indicators_valid = len(valid_indicators) == len(processed_indicators)
136
144
 
137
145
  logRequirements(impact_assessment, model=MODEL, term=TERM_ID,
138
146
  has_pef_indicators=has_pef_indicators,
139
- no_duplicate_indicators=no_duplicate_indicators,
147
+ all_indicators=log_as_table(processed_indicators),
140
148
  all_indicators_valid=all_indicators_valid,
141
- processed_indicators=log_as_table(processed_indicators),
142
- )
149
+ duplicate_indicators=_log_indicators(duplicate_indicators),
150
+ valid_indicators=_log_indicators(valid_indicators),
151
+ invalid_indicators=_log_indicators(invalid_indicators))
143
152
 
144
- should_run = all([has_pef_indicators, all_indicators_valid, no_duplicate_indicators])
153
+ should_run = all([has_pef_indicators, all_indicators_valid, not duplicate_indicators])
145
154
  logShouldRun(impact_assessment, MODEL, TERM_ID, should_run)
146
155
  return should_run, valid_indicators
147
156
 
@@ -1,13 +1,12 @@
1
- from datetime import datetime, timedelta
1
+ import os
2
2
  from hestia_earth.schema import SiteSiteType, TermTermType
3
3
  from hestia_earth.utils.model import filter_list_term_type
4
- from hestia_earth.utils.tools import to_precision, omit, pick, non_empty_list
4
+ from hestia_earth.utils.tools import to_precision, omit, pick, non_empty_list, flatten
5
5
  from hestia_earth.utils.blank_node import group_by_keys
6
6
 
7
7
  from hestia_earth.models.log import logRequirements, log_as_table, logShouldRun
8
- from hestia_earth.models.utils.constant import DAYS_IN_YEAR
9
8
  from hestia_earth.models.utils.management import _new_management
10
- from hestia_earth.models.utils.blank_node import DatestrFormat, _gapfill_datestr, DatestrGapfillMode
9
+ from hestia_earth.models.utils.blank_node import _gapfill_datestr, DatestrGapfillMode, condense_nodes
11
10
  from .utils import (
12
11
  LAND_USE_TERMS_FOR_TRANSFORMATION,
13
12
  IPCC_LAND_USE_CATEGORY_PERENNIAL,
@@ -79,6 +78,7 @@ LOOKUPS = {
79
78
  }
80
79
  MODEL_KEY = 'landCover'
81
80
 
81
+ _FALLBACK_COMPUTE_DATA = os.getenv('MODEL_LAND_COVER_FALLBACK_COMPUTE_DATA', 'true') == 'true'
82
82
  _BUILDING_SITE_TYPES = [
83
83
  SiteSiteType.AGRI_FOOD_PROCESSOR.value,
84
84
  SiteSiteType.ANIMAL_HOUSING.value,
@@ -91,11 +91,23 @@ _ALLOWED_SITE_TYPES = [
91
91
  SiteSiteType.PERMANENT_PASTURE.value
92
92
  ]
93
93
  _DEFAULT_WINDOW_IN_YEARS = 20
94
- _DATE_TOLERANCE_IN_YEARS = 1
95
94
  _ALLOWED_LAND_USE_TYPES = [ANNUAL_CROPLAND, PERMANENT_CROPLAND, PERMANENT_PASTURE, TOTAL_CROPLAND]
96
95
  _COMPLETE_CHANGES_OTHER_LAND = {k: 0 for k in LAND_USE_TERMS_FOR_TRANSFORMATION.keys()} | {OTHER_LAND: 1}
97
96
 
98
97
 
98
+ def _run(values: list):
99
+ blank_nodes = [
100
+ _new_management(
101
+ term=value.get("term-id"),
102
+ value=value.get("percentage"),
103
+ model=MODEL,
104
+ start_date=f"{value['year'] - _DEFAULT_WINDOW_IN_YEARS}-01-01",
105
+ end_date=f"{value['year'] - _DEFAULT_WINDOW_IN_YEARS}-12-31",
106
+ ) for value in values
107
+ ]
108
+ return condense_nodes(blank_nodes)
109
+
110
+
99
111
  def _should_group_landCover(management_node: dict):
100
112
  return any(
101
113
  bool(_get_lookup_with_cache(
@@ -107,7 +119,7 @@ def _should_group_landCover(management_node: dict):
107
119
 
108
120
 
109
121
  def _get_land_use_term_from_node(node: dict):
110
- return _get_lookup_with_cache(lookup_term=node.get("term", {}), column=LOOKUPS.get("landCover")[1])
122
+ return _get_lookup_with_cache(lookup_term=node.get("term", {}), column='FAOSTAT_LAND_AREA_CATEGORY')
111
123
 
112
124
 
113
125
  def _date_strip(date: str): return date[:10] if date else None
@@ -116,25 +128,6 @@ def _date_strip(date: str): return date[:10] if date else None
116
128
  def _date_year(date: str): return int(date[:4]) if date else None
117
129
 
118
130
 
119
- def _no_prior_land_cover_data(nodes: list, reference_date: str) -> bool:
120
- """
121
- Returns true if there are no nodes whose start & end dates the target_node falls within,
122
- including a tolerance.
123
- """
124
- target_date = (
125
- datetime.strptime(reference_date, DatestrFormat.YEAR_MONTH_DAY.value)
126
- - timedelta(days=_DEFAULT_WINDOW_IN_YEARS * DAYS_IN_YEAR)
127
- )
128
- tolerance = timedelta(days=_DATE_TOLERANCE_IN_YEARS * DAYS_IN_YEAR)
129
- previous_nodes = [
130
- node for node in nodes
131
- if datetime.strptime(node.get("startDate"), DatestrFormat.YEAR_MONTH_DAY.value) - tolerance
132
- < target_date <
133
- datetime.strptime(node.get("endDate"), DatestrFormat.YEAR_MONTH_DAY.value) + tolerance
134
- ]
135
- return len(previous_nodes) == 0
136
-
137
-
138
131
  def _collect_land_use_types(nodes: list) -> list:
139
132
  """Look up the land use type from management nodes."""
140
133
  return [
@@ -151,13 +144,11 @@ def _collect_land_use_types(nodes: list) -> list:
151
144
  def _site_area_valid(site_area: dict): return site_area and all([v is not None for v in site_area.values()])
152
145
 
153
146
 
154
- def _extend_site_area(site: dict, management_nodes: list, land_use_node: dict) -> list:
147
+ def _extend_site_area(site: dict, existing_years: set, land_use_node: dict) -> list:
155
148
  reference_year = land_use_node['year']
149
+ target_year = land_use_node['year'] - _DEFAULT_WINDOW_IN_YEARS
156
150
 
157
- has_no_prior_land_cover_data = _no_prior_land_cover_data(
158
- nodes=management_nodes,
159
- reference_date=f"{land_use_node['year']}-06-01"
160
- )
151
+ has_no_prior_land_cover_data = target_year not in existing_years
161
152
 
162
153
  site_area_from_lookups = get_site_area_from_lookups(
163
154
  country_id=site.get("country", {}).get("@id"),
@@ -171,7 +162,7 @@ def _extend_site_area(site: dict, management_nodes: list, land_use_node: dict) -
171
162
  term=land_use_node['term'],
172
163
  land_use_type=land_use_node['landUseType'],
173
164
  reference_year=reference_year
174
- ) if not has_siteArea_from_lookups else ({}, False, {})
165
+ ) if not has_siteArea_from_lookups and _FALLBACK_COMPUTE_DATA else ({}, False, {})
175
166
 
176
167
  is_perenial = crop_ipcc_land_use_category(land_use_node['landCover-id']) == IPCC_LAND_USE_CATEGORY_PERENNIAL
177
168
 
@@ -225,6 +216,9 @@ def _should_run(site: dict) -> tuple[bool, list, dict]:
225
216
  if not _should_group_landCover(node)
226
217
  ])
227
218
 
219
+ # get all existing dates that we should not add again
220
+ existing_years = set(non_empty_list(flatten(map(_years_from_node, management_nodes))))
221
+
228
222
  # get the Management `landCover` nodes that are "landUse" nodes
229
223
  land_use_nodes = [
230
224
  node for node in management_nodes
@@ -240,7 +234,7 @@ def _should_run(site: dict) -> tuple[bool, list, dict]:
240
234
 
241
235
  # add metadata
242
236
  land_use_nodes = sorted([
243
- node if is_site_building else _extend_site_area(site, management_nodes, node)
237
+ node if is_site_building else _extend_site_area(site, existing_years, node)
244
238
  for node in land_use_nodes
245
239
  ], key=lambda n: n['year'])
246
240
 
@@ -314,12 +308,4 @@ def _should_run(site: dict) -> tuple[bool, list, dict]:
314
308
 
315
309
  def run(site: dict) -> list:
316
310
  should_run, values = _should_run(site=site)
317
- return [
318
- _new_management(
319
- term=value.get("term-id"),
320
- value=value.get("percentage"),
321
- model=MODEL,
322
- start_date=f"{value['year'] - _DEFAULT_WINDOW_IN_YEARS}-01-01",
323
- end_date=f"{value['year'] - _DEFAULT_WINDOW_IN_YEARS}-12-31",
324
- ) for value in values
325
- ] if should_run else []
311
+ return _run(values) if should_run else []
@@ -1,4 +1,3 @@
1
- import functools
2
1
  import math
3
2
  from functools import lru_cache
4
3
  from collections import defaultdict
@@ -39,6 +38,25 @@ def get_land_use_terms():
39
38
  return [v[0] for v in LAND_USE_TERMS_FOR_TRANSFORMATION.values()]
40
39
 
41
40
 
41
+ @lru_cache()
42
+ def _get_immutable_lookup(term_id: str, term_type: str, col: str):
43
+ new_term = {"@id": term_id, "termType": term_type} if term_type and term_id else {}
44
+ return get_lookup_value(
45
+ lookup_term=new_term,
46
+ column=col,
47
+ skip_debug=False,
48
+ model=MODEL,
49
+ term=term_id
50
+ )
51
+
52
+
53
+ def _get_lookup_with_cache(lookup_term: dict, column: str):
54
+ return _get_immutable_lookup(term_id=lookup_term.get("@id"), term_type=lookup_term.get("termType"), col=column)
55
+
56
+
57
+ def _get_faostat_name(term: dict) -> str: return _get_lookup_with_cache(term, "cropGroupingFaostatArea")
58
+
59
+
42
60
  def _is_missing_or_none(value) -> bool:
43
61
  return value is None or _is_missing_value(value)
44
62
 
@@ -68,26 +86,6 @@ def _cap_values(dictionary: dict, lower_limit: float = 0, upper_limit: float = 1
68
86
  return {key: min([upper_limit, max([lower_limit, value])]) for key, value in dictionary.items()}
69
87
 
70
88
 
71
- def _get_lookup_with_cache(lookup_term, column):
72
- """Wrapper to get_lookup_value which pulls out the immutable parts of the term to allow caching."""
73
- @functools.cache
74
- def _get_immutable_lookup(term_id: str, term_type: str, col: str):
75
- new_term = {"@id": term_id, "termType": term_type} if term_type and term_id else {}
76
- return get_lookup_value(
77
- lookup_term=new_term,
78
- column=col,
79
- skip_debug=False,
80
- model=MODEL,
81
- term=term_id
82
- )
83
-
84
- return _get_immutable_lookup(
85
- term_id=lookup_term.get("@id"),
86
- term_type=lookup_term.get("termType"),
87
- col=column
88
- )
89
-
90
-
91
89
  def _get_changes(country_id: str, reference_year: int) -> tuple[dict, list]:
92
90
  """
93
91
  For each entry in ALL_LAND_USE_TERMS, creates a key: value in output dictionary, also TOTAL
@@ -349,11 +347,6 @@ def _get_shares_of_expansion(
349
347
  })
350
348
 
351
349
 
352
- def _get_faostat_name(term: dict) -> str:
353
- """For landCover terms, find the cropGroupingFaostatArea name for the landCover id."""
354
- return _get_lookup_with_cache(term, "cropGroupingFaostatArea")
355
-
356
-
357
350
  def _get_most_common_or_alphabetically_first(crop_terms: list) -> str:
358
351
  histogram = {term: crop_terms.count(term) for term in crop_terms}
359
352
  max_freq = max(histogram.values())
@@ -363,7 +356,8 @@ def _get_most_common_or_alphabetically_first(crop_terms: list) -> str:
363
356
 
364
357
  def _get_complete_faostat_to_crop_mapping() -> dict:
365
358
  """Returns mapping in the format: {faostat_name: IPCC_LAND_USE_CATEGORY, ...}"""
366
- lookup = download_lookup("crop.csv")
359
+ term_type = TermTermType.CROP.value
360
+ lookup = download_lookup(f"{term_type}.csv")
367
361
  mappings = defaultdict(list)
368
362
  for crop_term_id in [row[0] for row in lookup]:
369
363
  key = column_name(
@@ -746,24 +740,25 @@ def get_site_area_from_lookups(country_id: str, reference_year: int, term: dict)
746
740
  """
747
741
  lookup_prefix = 'region-crop-cropGroupingFAOSTAT-landCover'
748
742
  lookup_column = _get_faostat_name(term)
743
+ raw_region_data = {
744
+ land_type: get_region_lookup_value(
745
+ lookup_name=f"{lookup_prefix}-{_get_land_cover_lookup_suffix(land_type)}.csv",
746
+ term_id=country_id,
747
+ column=lookup_column,
748
+ model=MODEL,
749
+ model_key=MODEL_KEY
750
+ ) if lookup_column else None
751
+ for land_type in LAND_USE_TERMS_FOR_TRANSFORMATION.keys()
752
+ }
753
+ parsed_region_data = {
754
+ land_type: safe_parse_float(
755
+ value=extract_grouped_data(data=value, key=str(reference_year)),
756
+ default=None
757
+ )
758
+ for land_type, value in raw_region_data.items()
759
+ }
760
+ # Divide by 100 to match site_area ratios
749
761
  return {
750
- # Divide by 100 to match site_area ratios
751
762
  land_type: value / 100 if value is not None else value
752
- for land_type, value in
753
- {
754
- land_type: safe_parse_float(
755
- value=extract_grouped_data(
756
- data=get_region_lookup_value(
757
- lookup_name=f"{lookup_prefix}-{_get_land_cover_lookup_suffix(land_type)}.csv",
758
- term_id=country_id,
759
- column=lookup_column,
760
- model=MODEL,
761
- key=MODEL_KEY
762
- ),
763
- key=str(reference_year)
764
- ),
765
- default=None
766
- )
767
- for land_type in LAND_USE_TERMS_FOR_TRANSFORMATION.keys()
768
- }.items()
763
+ for land_type, value in parsed_region_data.items()
769
764
  }
@@ -74,7 +74,8 @@ LOOKUPS = {
74
74
  ],
75
75
  "crop-property": ["neutralDetergentFibreContent", "energyContentHigherHeatingValue"],
76
76
  "forage-property": ["neutralDetergentFibreContent", "energyContentHigherHeatingValue"],
77
- "processedFood-property": ["neutralDetergentFibreContent", "energyContentHigherHeatingValue"]
77
+ "processedFood-property": ["neutralDetergentFibreContent", "energyContentHigherHeatingValue"],
78
+ "feedFoodAdditive": ["hasEnergyContent"]
78
79
  }
79
80
  RETURNS = {
80
81
  "Emission": [{