hestia-earth-models 0.73.8__py3-none-any.whl → 0.74.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hestia-earth-models might be problematic. Click here for more details.

Files changed (58) hide show
  1. hestia_earth/models/aware/scarcityWeightedWaterUse.py +7 -6
  2. hestia_earth/models/aware2_0/__init__.py +14 -0
  3. hestia_earth/models/aware2_0/scarcityWeightedWaterUse.py +115 -0
  4. hestia_earth/models/config/Cycle.json +5 -3
  5. hestia_earth/models/config/ImpactAssessment.json +1 -1
  6. hestia_earth/models/config/__init__.py +26 -2
  7. hestia_earth/models/cycle/animal/input/hestiaAggregatedData.py +2 -2
  8. hestia_earth/models/cycle/animal/input/properties.py +6 -5
  9. hestia_earth/models/cycle/animal/milkYield.py +8 -3
  10. hestia_earth/models/cycle/utils.py +6 -6
  11. hestia_earth/models/data/ecoinventV3/__init__.py +8 -26
  12. hestia_earth/models/ecoalimV9/cycle.py +26 -10
  13. hestia_earth/models/ecoalimV9/impact_assessment.py +30 -10
  14. hestia_earth/models/ecoalimV9/utils.py +12 -72
  15. hestia_earth/models/ecoinventV3/__init__.py +8 -140
  16. hestia_earth/models/ecoinventV3/cycle.py +140 -0
  17. hestia_earth/models/ecoinventV3/utils.py +28 -1
  18. hestia_earth/models/ecoinventV3AndEmberClimate/__init__.py +8 -137
  19. hestia_earth/models/ecoinventV3AndEmberClimate/cycle.py +144 -0
  20. hestia_earth/models/emepEea2019/utils.py +2 -3
  21. hestia_earth/models/environmentalFootprintV3_1/environmentalFootprintSingleOverallScore.py +5 -7
  22. hestia_earth/models/frischknechtEtAl2000/ionisingRadiationKbqU235Eq.py +41 -43
  23. hestia_earth/models/geospatialDatabase/awareWaterBasinId.py +2 -2
  24. hestia_earth/models/geospatialDatabase/awareWaterBasinId_v1.py +45 -0
  25. hestia_earth/models/hestia/default_emissions.py +5 -1
  26. hestia_earth/models/hestia/default_resourceUse.py +5 -1
  27. hestia_earth/models/hestia/landCover.py +110 -12
  28. hestia_earth/models/hestia/utils.py +1 -0
  29. hestia_earth/models/hestia/waterSalinity.py +2 -3
  30. hestia_earth/models/impact_assessment/emissions.py +1 -1
  31. hestia_earth/models/linkedImpactAssessment/emissions.py +2 -2
  32. hestia_earth/models/log.py +8 -3
  33. hestia_earth/models/mocking/search-results.json +1549 -1545
  34. hestia_earth/models/utils/__init__.py +3 -0
  35. hestia_earth/models/utils/background_emissions.py +109 -9
  36. hestia_earth/models/utils/blank_node.py +1 -11
  37. hestia_earth/models/utils/feedipedia.py +2 -2
  38. hestia_earth/models/utils/impact_assessment.py +1 -3
  39. hestia_earth/models/utils/lookup.py +1 -1
  40. hestia_earth/models/version.py +1 -1
  41. hestia_earth/orchestrator/log.py +8 -3
  42. {hestia_earth_models-0.73.8.dist-info → hestia_earth_models-0.74.0.dist-info}/METADATA +2 -2
  43. {hestia_earth_models-0.73.8.dist-info → hestia_earth_models-0.74.0.dist-info}/RECORD +58 -49
  44. tests/models/aware2_0/__init__.py +0 -0
  45. tests/models/aware2_0/test_scarcityWeightedWaterUse.py +58 -0
  46. tests/models/ecoinventV3/__init__.py +0 -0
  47. tests/models/{test_ecoinventV3.py → ecoinventV3/test_cycle.py} +5 -5
  48. tests/models/ecoinventV3AndEmberClimate/__init__.py +0 -0
  49. tests/models/{test_ecoinventV3AndEmberClimate.py → ecoinventV3AndEmberClimate/test_cycle.py} +6 -4
  50. tests/models/environmentalFootprintV3_1/test_environmentalFootprintSingleOverallScore.py +2 -2
  51. tests/models/frischknechtEtAl2000/test_ionisingRadiationKbqU235Eq.py +18 -27
  52. tests/models/hestia/test_landCover.py +16 -6
  53. tests/models/site/pre_checks/test_cache_geospatialDatabase.py +4 -4
  54. tests/models/test_config.py +53 -7
  55. tests/models/{ecoalimV9/test_utils.py → utils/test_background_emissions.py} +2 -2
  56. {hestia_earth_models-0.73.8.dist-info → hestia_earth_models-0.74.0.dist-info}/LICENSE +0 -0
  57. {hestia_earth_models-0.73.8.dist-info → hestia_earth_models-0.74.0.dist-info}/WHEEL +0 -0
  58. {hestia_earth_models-0.73.8.dist-info → hestia_earth_models-0.74.0.dist-info}/top_level.txt +0 -0
@@ -38,8 +38,8 @@ RETURNS = {
38
38
  }
39
39
  LOOKUPS = {
40
40
  "@doc": "Different lookup files are used depending on the situation",
41
- "awareWaterBasinId": "",
42
- "region-aware-factors": ""
41
+ "awareWaterBasinId": ["YR_IRRI", "YR_NONIRRI", "YR_TOT"],
42
+ "region-aware-factors": ["Agg_CF_irri", "Agg_CF_non_irri", "Agg_CF_unspecified"]
43
43
  }
44
44
  TERM_ID = 'scarcityWeightedWaterUse'
45
45
  AWARE_KEY = 'awareWaterBasinId'
@@ -48,6 +48,8 @@ IRRIGATED_SITE_TYPES = [
48
48
  SiteSiteType.GLASS_OR_HIGH_ACCESSIBLE_COVER.value,
49
49
  SiteSiteType.PERMANENT_PASTURE.value
50
50
  ]
51
+ _REGION_LOOKUP = 'region-aware-factors.csv'
52
+ _AWARE_LOOKUP = 'awareWaterBasinId.csv'
51
53
 
52
54
 
53
55
  def _indicator(value: float):
@@ -59,19 +61,18 @@ def _indicator(value: float):
59
61
  def _get_factor_from_basinId(site: dict, aware_id: str):
60
62
  lookup_col = 'YR_IRRI' if site.get('siteType') in IRRIGATED_SITE_TYPES else 'YR_NONIRRI'
61
63
  value = _get_single_table_value(
62
- download_lookup(f"{AWARE_KEY}.csv"), column_name(AWARE_KEY), int(aware_id), column_name(lookup_col)
64
+ download_lookup(_AWARE_LOOKUP), column_name(AWARE_KEY), int(aware_id), column_name(lookup_col)
63
65
  )
64
- debugMissingLookup(f"{AWARE_KEY}.csv", AWARE_KEY, aware_id, lookup_col, value, model=MODEL, term=TERM_ID)
66
+ debugMissingLookup(_AWARE_LOOKUP, AWARE_KEY, aware_id, lookup_col, value, model=MODEL, term=TERM_ID)
65
67
  return safe_parse_float(value, default=None)
66
68
 
67
69
 
68
70
  def _get_factor_from_region(impact_assessment: dict, fresh_water: dict, site: dict):
69
71
  region_id = get_region_id(impact_assessment, fresh_water)
70
72
  site_type = site.get('siteType')
71
- lookup_name = 'region-aware-factors.csv'
72
73
  lookup_suffix = 'unspecified' if not site_type else ('irri' if site_type in IRRIGATED_SITE_TYPES else 'non_irri')
73
74
  column = f"Agg_CF_{lookup_suffix}"
74
- value = get_region_lookup_value(lookup_name, region_id, column, model=MODEL, term=TERM_ID)
75
+ value = get_region_lookup_value(_REGION_LOOKUP, region_id, column, model=MODEL, term=TERM_ID)
75
76
  return safe_parse_float(value, default=None)
76
77
 
77
78
 
@@ -0,0 +1,14 @@
1
+ import sys
2
+ from importlib import import_module
3
+ from os.path import dirname, abspath
4
+
5
+ from hestia_earth.models.utils.blank_node import run_if_required
6
+
7
+ CURRENT_DIR = dirname(abspath(__file__)) + '/'
8
+ sys.path.append(CURRENT_DIR)
9
+ MODEL = 'aware2-0'
10
+ MODEL_FOLDER = MODEL.replace('-', '_')
11
+ PKG = '.'.join(['hestia_earth', 'models', MODEL_FOLDER])
12
+
13
+
14
+ def run(model: str, data): return run_if_required(MODEL, model, data, import_module(f".{model}", package=PKG))
@@ -0,0 +1,115 @@
1
+ from hestia_earth.schema import SiteSiteType
2
+ from hestia_earth.utils.model import find_term_match
3
+ from hestia_earth.utils.lookup import download_lookup, _get_single_table_value, column_name
4
+ from hestia_earth.utils.tools import safe_parse_float
5
+
6
+ from hestia_earth.models.log import logRequirements, debugMissingLookup, logShouldRun
7
+ from hestia_earth.models.utils import sum_values, multiply_values
8
+ from hestia_earth.models.utils.indicator import _new_indicator
9
+ from hestia_earth.models.utils.impact_assessment import (
10
+ convert_value_from_cycle, get_product, get_site, get_region_id
11
+ )
12
+ from hestia_earth.models.utils.input import sum_input_impacts
13
+ from hestia_earth.models.utils.lookup import get_region_lookup_value
14
+ from . import MODEL
15
+
16
+ REQUIREMENTS = {
17
+ "ImpactAssessment": {
18
+ "site": {
19
+ "@type": "Site",
20
+ "or": {
21
+ "awareWaterBasinId": "",
22
+ "country": {"@type": "Term", "termType": "region"}
23
+ }
24
+ },
25
+ "optional": {
26
+ "emissionsResourceUse": [{
27
+ "@type": "Indicator",
28
+ "term.@id": "freshwaterWithdrawalsDuringCycle",
29
+ "value": ""
30
+ }]
31
+ }
32
+ }
33
+ }
34
+ RETURNS = {
35
+ "Indicator": [{
36
+ "value": ""
37
+ }]
38
+ }
39
+ LOOKUPS = {
40
+ "@doc": "Different lookup files are used depending on the situation",
41
+ "awareWaterBasinId-2-0": ["CFs_agri", "CFs_nonagri", "CFs_unspecified"],
42
+ "region-aware-2-0-factors": ["CFs_agri", "CFs_nonagri", "CFs_unspecified"]
43
+ }
44
+ TERM_ID = 'scarcityWeightedWaterUse'
45
+ AWARE_KEY = 'awareWaterBasinId'
46
+ AGRI_SITE_TYPES = [
47
+ SiteSiteType.CROPLAND.value,
48
+ SiteSiteType.GLASS_OR_HIGH_ACCESSIBLE_COVER.value,
49
+ SiteSiteType.PERMANENT_PASTURE.value
50
+ ]
51
+ _REGION_LOOKUP = 'region-aware-2-0-factors.csv'
52
+ _AWARE_LOOKUP = 'awareWaterBasinId-2-0.csv'
53
+
54
+
55
+ def _indicator(value: float):
56
+ indicator = _new_indicator(TERM_ID, MODEL)
57
+ indicator['value'] = value
58
+ return indicator
59
+
60
+
61
+ def _lookup_column(site: dict):
62
+ site_type = site.get('siteType')
63
+ return (
64
+ 'CFs_unspecified' if not site_type else
65
+ 'CFs_agri' if site_type in AGRI_SITE_TYPES else
66
+ 'CFs_nonagri'
67
+ )
68
+
69
+
70
+ def _get_factor_from_basinId(site: dict, aware_id: str):
71
+ lookup_col = _lookup_column(site)
72
+ lookup = download_lookup(_AWARE_LOOKUP)
73
+ try:
74
+ value = _get_single_table_value(lookup, column_name(AWARE_KEY), int(aware_id), column_name(lookup_col))
75
+ except Exception:
76
+ value = None
77
+ debugMissingLookup(_AWARE_LOOKUP, AWARE_KEY, aware_id, lookup_col, value, model=MODEL, term=TERM_ID)
78
+ return safe_parse_float(value, default=None)
79
+
80
+
81
+ def _get_factor_from_region(impact_assessment: dict, fresh_water: dict, site: dict):
82
+ region_id = get_region_id(impact_assessment, fresh_water)
83
+ lookup_col = _lookup_column(site)
84
+ value = get_region_lookup_value(_REGION_LOOKUP, region_id, lookup_col, model=MODEL, term=TERM_ID)
85
+ return safe_parse_float(value, default=None)
86
+
87
+
88
+ def run(impact_assessment: dict):
89
+ cycle = impact_assessment.get('cycle', {})
90
+ product = get_product(impact_assessment)
91
+ fresh_water = find_term_match(impact_assessment.get('emissionsResourceUse', []), 'freshwaterWithdrawalsDuringCycle')
92
+ site = get_site(impact_assessment)
93
+ aware_id = site.get(AWARE_KEY)
94
+ factor = (
95
+ _get_factor_from_basinId(site, aware_id) if aware_id else None
96
+ ) or _get_factor_from_region(impact_assessment, fresh_water, site)
97
+ inputs_value = convert_value_from_cycle(
98
+ impact_assessment, product, sum_input_impacts(cycle.get('inputs', []), TERM_ID), model=MODEL, term_id=TERM_ID
99
+ )
100
+
101
+ value = sum_values([
102
+ multiply_values([fresh_water.get('value'), factor]),
103
+ inputs_value
104
+ ])
105
+
106
+ logRequirements(impact_assessment, model=MODEL, term=TERM_ID,
107
+ fresh_water=fresh_water.get('value'),
108
+ aware_id=aware_id,
109
+ factor=factor,
110
+ inputs_value=inputs_value)
111
+
112
+ should_run = all([value is not None])
113
+ logShouldRun(impact_assessment, MODEL, TERM_ID, should_run)
114
+
115
+ return [_indicator(value)] if should_run else []
@@ -955,7 +955,7 @@
955
955
  {
956
956
  "key": "emissions",
957
957
  "model": "ecoinventV3AndEmberClimate",
958
- "value": "all",
958
+ "value": "cycle",
959
959
  "runStrategy": "always",
960
960
  "mergeStrategy": "list",
961
961
  "mergeArgs": {
@@ -966,7 +966,7 @@
966
966
  {
967
967
  "key": "emissions",
968
968
  "model": "ecoinventV3",
969
- "value": "all",
969
+ "value": "cycle",
970
970
  "runStrategy": "always",
971
971
  "mergeStrategy": "list",
972
972
  "mergeArgs": {
@@ -1469,7 +1469,9 @@
1469
1469
  "replaceThreshold": ["value", 0.01]
1470
1470
  },
1471
1471
  "stage": 2
1472
- },
1472
+ }
1473
+ ],
1474
+ [
1473
1475
  {
1474
1476
  "key": "emissions",
1475
1477
  "model": "dammgen2009",
@@ -806,7 +806,7 @@
806
806
  },
807
807
  {
808
808
  "key": "impacts",
809
- "model": "aware",
809
+ "model": "aware2-0",
810
810
  "value": "scarcityWeightedWaterUse",
811
811
  "runStrategy": "always",
812
812
  "mergeStrategy": "list",
@@ -1,10 +1,16 @@
1
1
  import os
2
2
  import json
3
+ from enum import Enum
3
4
  from hestia_earth.utils.tools import flatten
4
5
 
5
6
  CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
6
7
 
7
8
 
9
+ class AWARE_VERSION(Enum):
10
+ V1 = '1.2'
11
+ V2 = '2.0'
12
+
13
+
8
14
  def _is_aggregated_model(model: dict):
9
15
  return isinstance(model, dict) and 'aggregated' in model.get('value', '').lower()
10
16
 
@@ -17,12 +23,26 @@ def _remove_aggregated(models: list):
17
23
  return list(filter(lambda v: v is not None, values))
18
24
 
19
25
 
26
+ def _use_aware_1(models: list):
27
+ return [
28
+ _use_aware_1(m) if isinstance(m, list) else
29
+ m | {'model': 'aware'} if m.get('model') == 'aware2-0' else
30
+ m | {'value': 'awareWaterBasinId_v1'} if m.get('value') == 'awareWaterBasinId' else
31
+ m
32
+ for m in models
33
+ ]
34
+
35
+
20
36
  def _load_config(filename: str) -> dict:
21
37
  with open(os.path.join(CURRENT_DIR, f"{filename}.json"), 'r') as f:
22
38
  return json.load(f)
23
39
 
24
40
 
25
- def load_config(node_type: str, skip_aggregated_models: bool = False) -> dict:
41
+ def load_config(
42
+ node_type: str,
43
+ skip_aggregated_models: bool = False,
44
+ use_aware_version: AWARE_VERSION = AWARE_VERSION.V2
45
+ ) -> dict:
26
46
  """
27
47
  Load the configuration associated with the Node Type.
28
48
 
@@ -32,11 +52,15 @@ def load_config(node_type: str, skip_aggregated_models: bool = False) -> dict:
32
52
  The Node Type to load configuration. Can be: `Cycle`, `Site`, `ImpactAssessment`.
33
53
  skip_aggregated_models : bool
34
54
  Include models using aggregated data. Included by default.
55
+ use_aware_version : AWARE_VERSION
56
+ Choose which AWARE version to use. Defaults to using version `2.0`.
35
57
  """
36
58
  try:
37
59
  config = _load_config(node_type)
38
60
  models = config.get('models')
39
- return config | {'models': _remove_aggregated(models) if skip_aggregated_models else models}
61
+ models = _remove_aggregated(models) if skip_aggregated_models else models
62
+ models = _use_aware_1(models) if use_aware_version == AWARE_VERSION.V1 else models
63
+ return config | {'models': models}
40
64
  except FileNotFoundError:
41
65
  raise Exception(f"Invalid type {node_type}.")
42
66
 
@@ -56,12 +56,12 @@ def _should_run_animal(cycle: dict, animal: dict):
56
56
  inputs = list(filter(should_link_input_to_impact(cycle), inputs))
57
57
  nb_inputs = len(inputs)
58
58
 
59
- logRequirements(cycle, model=MODEL_ID, term=term_id, key=MODEL_KEY,
59
+ logRequirements(cycle, model=MODEL_ID, term=term_id, key=MODEL_KEY, animalId=animal.get('animalId'),
60
60
  end_date=end_date,
61
61
  nb_inputs=nb_inputs)
62
62
 
63
63
  should_run = all([end_date, nb_inputs > 0])
64
- logShouldRun(cycle, MODEL_ID, term_id, should_run, key=MODEL_KEY)
64
+ logShouldRun(cycle, MODEL_ID, term_id, should_run, key=MODEL_KEY, animalId=animal.get('animalId'))
65
65
  return should_run, inputs
66
66
 
67
67
 
@@ -50,7 +50,7 @@ def _find_related_product(input: dict):
50
50
  return find_term_match(products, input.get('term', {}).get('@id'))
51
51
 
52
52
 
53
- def _run_input_by_impactAssessment(cycle: dict):
53
+ def _run_input_by_impactAssessment(cycle: dict, **log_args):
54
54
  def exec(input: dict):
55
55
  term_id = input.get('term', {}).get('@id')
56
56
  product = _find_related_product(input)
@@ -58,7 +58,7 @@ def _run_input_by_impactAssessment(cycle: dict):
58
58
  all_properties = input.get('properties', [])
59
59
  new_properties = [p for p in properties if not find_term_match(all_properties, p.get('term', {}).get('@id'))]
60
60
  for prop in new_properties:
61
- logShouldRun(cycle, MODEL, term_id, True, property=prop.get('term', {}).get('@id'))
61
+ logShouldRun(cycle, MODEL, term_id, True, property=prop.get('term', {}).get('@id'), **log_args)
62
62
  return {**input, 'properties': merge_blank_nodes(all_properties, new_properties)} if new_properties else input
63
63
  return exec
64
64
 
@@ -80,9 +80,10 @@ def _run_animal(cycle: dict, animal: dict):
80
80
  should_run_properties_value(i)
81
81
  ])
82
82
  ]
83
- inputs = list(map(_run_input_by_impactAssessment(cycle), inputs))
84
- inputs = rescale_properties_from_dryMatter(MODEL, cycle, inputs)
85
- inputs = average_blank_node_properties_value(cycle, inputs)
83
+ log_args = {'animalId': animal.get('animalId')}
84
+ inputs = list(map(_run_input_by_impactAssessment(cycle, **log_args), inputs))
85
+ inputs = rescale_properties_from_dryMatter(MODEL, cycle, inputs, **log_args)
86
+ inputs = average_blank_node_properties_value(cycle, inputs, **log_args)
86
87
  return animal | {'inputs': inputs}
87
88
 
88
89
 
@@ -42,12 +42,17 @@ def _run(cycle: dict, animal: dict):
42
42
  practices = non_empty_list(
43
43
  [p for p in cycle.get('practices', []) if p.get('term', {}).get('@id') in practice_ids]
44
44
  )
45
+ log_args = {
46
+ 'model_key': MODEL_KEY,
47
+ 'animalId': animal.get('animalId')
48
+ }
45
49
 
46
- logRequirements(cycle, model=MODEL, term=term_id, model_key=MODEL_KEY,
47
- practice_ids=log_blank_nodes_id(practices))
50
+ logRequirements(cycle, model=MODEL, term=term_id,
51
+ practice_ids=log_blank_nodes_id(practices),
52
+ **log_args)
48
53
 
49
54
  for practice in practices:
50
- logShouldRun(cycle, MODEL, practice.get('term', {}).get('@id'), True, model_key=MODEL_KEY)
55
+ logShouldRun(cycle, MODEL, practice.get('term', {}).get('@id'), True, **log_args)
51
56
 
52
57
  return {
53
58
  **animal,
@@ -11,21 +11,21 @@ def _should_run_property_by_min_max(property: dict):
11
11
  ])
12
12
 
13
13
 
14
- def _run_property(cycle: dict, property: dict):
14
+ def _run_property(cycle: dict, property: dict, **log_args):
15
15
  term_id = property.get('term', {}).get('@id')
16
16
 
17
17
  should_run = _should_run_property_by_min_max(property)
18
- logShouldRun(cycle, MODEL, term_id, should_run, key='value')
18
+ logShouldRun(cycle, MODEL, term_id, should_run, key='value', **log_args)
19
19
 
20
20
  return property | ({
21
21
  'value': list_average([property.get('min'), property.get('max')])
22
22
  } if should_run else {})
23
23
 
24
24
 
25
- def _run_properties(cycle: dict, blank_node: dict):
25
+ def _run_properties(cycle: dict, blank_node: dict, **log_args):
26
26
  properties = blank_node.get('properties', [])
27
27
  return blank_node | ({
28
- 'properties': [_run_property(cycle, p) for p in properties]
28
+ 'properties': [_run_property(cycle, p, **log_args) for p in properties]
29
29
  } if properties else {})
30
30
 
31
31
 
@@ -33,5 +33,5 @@ def should_run_properties_value(blank_node: dict):
33
33
  return any(map(_should_run_property_by_min_max, blank_node.get('properties', [])))
34
34
 
35
35
 
36
- def average_blank_node_properties_value(cycle: dict, blank_nodes: list):
37
- return [_run_properties(cycle, v) for v in blank_nodes]
36
+ def average_blank_node_properties_value(cycle: dict, blank_nodes: list, **log_args):
37
+ return [_run_properties(cycle, v, **log_args) for v in blank_nodes]
@@ -1,36 +1,18 @@
1
1
  import os
2
- from functools import lru_cache
3
- from hestia_earth.utils.lookup import column_name, get_table_value, load_lookup, lookup_columns
4
- from hestia_earth.utils.tools import non_empty_list
5
2
 
6
3
  from hestia_earth.models.log import logger
7
4
 
8
5
  _CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
9
- _ENV_NAME = 'ECOINVENT_V3_FILEPATH'
6
+ _ENV_FOLDER = 'ECOINVENT_V3_FOLDER'
7
+ _ECOINVENT_FOLDER = os.getenv(_ENV_FOLDER) or _CURRENT_DIR
8
+ _ECOINVENT_VERSION = os.getenv('ECOINVENT_VERSION', '3.9')
10
9
 
11
10
 
12
- @lru_cache()
13
- def _get_file():
14
- filepath = os.getenv(_ENV_NAME, f"{os.path.join(_CURRENT_DIR, 'ecoinventV3_excerpt')}.csv")
15
-
11
+ def get_filepath(term_type: str):
12
+ filename = f"ecoinventV{_ECOINVENT_VERSION.replace('.', '_')}-{term_type}.csv"
13
+ filepath = os.path.join(_ECOINVENT_FOLDER, filename)
16
14
  if not os.path.exists(filepath):
17
- logger.warning('Ecoinvent file not found. Please make sure to set env variable "%s".', _ENV_NAME)
15
+ logger.warning('Ecoinvent file not found. Please make sure to set env variable "%s".', _ENV_FOLDER)
18
16
  return None
19
17
 
20
- return load_lookup(filepath=filepath, keep_in_memory=True)
21
-
22
-
23
- def ecoinventV3_emissions(ecoinventName: str):
24
- lookup = _get_file()
25
- col_name = column_name('ecoinventName')
26
-
27
- def emission(column: str):
28
- id = get_table_value(lookup, col_name, ecoinventName, column_name(column))
29
- value = get_table_value(lookup, col_name, ecoinventName, column_name(column.replace('termid', 'value')))
30
- return (id, value) if id else None
31
-
32
- columns = [
33
- col for col in lookup_columns(lookup)
34
- if col.endswith(column_name('termid'))
35
- ]
36
- return non_empty_list(map(emission, columns))
18
+ return filepath
@@ -1,17 +1,18 @@
1
- from functools import reduce
2
1
  from statistics import mean
3
2
  from hestia_earth.schema import EmissionMethodTier, TermTermType
4
3
  from hestia_earth.utils.tools import flatten, list_sum
4
+ from hestia_earth.utils.blank_node import group_by_keys
5
5
 
6
6
  from hestia_earth.models.log import logShouldRun, logRequirements
7
7
  from hestia_earth.models.utils.emission import _new_emission
8
8
  from hestia_earth.models.utils.background_emissions import (
9
9
  get_background_inputs,
10
10
  no_gap_filled_background_emissions,
11
- log_missing_emissions
11
+ log_missing_emissions,
12
+ parse_term_id,
13
+ process_input_mappings
12
14
  )
13
- from hestia_earth.models.utils.blank_node import group_by_keys
14
- from .utils import get_input_mappings, process_input, parse_term_id
15
+ from .utils import get_input_mappings, extract_input_mapping
15
16
  from . import MODEL
16
17
 
17
18
  REQUIREMENTS = {
@@ -51,6 +52,7 @@ RETURNS = {
51
52
  }
52
53
  LOOKUPS = {
53
54
  "ecoalim-emission": "emission-",
55
+ "emission": "inputProductionGroupId",
54
56
  "animalProduct": "ecoalimMapping",
55
57
  "crop": "ecoalimMapping",
56
58
  "feedFoodAdditive": "ecoalimMapping",
@@ -84,6 +86,13 @@ def _run_input(cycle: dict):
84
86
  mappings = get_input_mappings(MODEL, input)
85
87
  has_mappings = len(mappings) > 0
86
88
 
89
+ # grouping the inputs together in the logs
90
+ input_parent_term_id = (input.get('parent', {})).get('@id') or input.get('animalId', {})
91
+ extra_logs = {
92
+ **({'input_group_id': input_parent_term_id} if input_parent_term_id else {}),
93
+ **({'animalId': input.get('animalId')} if input.get('animalId') else {})
94
+ }
95
+
87
96
  # skip input that has background emissions we have already gap-filled (model run before)
88
97
  has_no_gap_filled_background_emissions = no_gap_filled_background_emissions_func(input)
89
98
 
@@ -91,15 +100,22 @@ def _run_input(cycle: dict):
91
100
  has_mappings=has_mappings,
92
101
  mappings=';'.join([v[1] for v in mappings]),
93
102
  has_no_gap_filled_background_emissions=has_no_gap_filled_background_emissions,
94
- input_value=input_value)
103
+ input_value=input_value,
104
+ **extra_logs)
95
105
 
96
106
  should_run = all([has_mappings, has_no_gap_filled_background_emissions, input_value])
97
- logShouldRun(cycle, MODEL, input_term_id, should_run, methodTier=TIER, model_key=MODEL_KEY)
107
+ logShouldRun(cycle, MODEL, input_term_id, should_run, methodTier=TIER, model_key=MODEL_KEY, **extra_logs)
98
108
 
99
- results = process_input(
100
- cycle, input, mappings, TermTermType.EMISSION, model_key=MODEL_KEY
109
+ results = process_input_mappings(
110
+ cycle, input, mappings, TermTermType.EMISSION,
111
+ extract_mapping=extract_input_mapping,
112
+ **(
113
+ extra_logs | {'model': MODEL, 'model_key': MODEL_KEY}
114
+ )
101
115
  ) if should_run else {}
102
- log_missing_emissions_func(input_term_id, list(map(parse_term_id, results.keys())))
116
+ log_missing_emissions_func(input_term_id, list(map(parse_term_id, results.keys())), **(
117
+ extra_logs | {'has_mappings': has_mappings}
118
+ ))
103
119
  return [
104
120
  _emission(
105
121
  term_id=parse_term_id(term_id),
@@ -115,5 +131,5 @@ def _run_input(cycle: dict):
115
131
 
116
132
  def run(cycle: dict):
117
133
  inputs = get_background_inputs(cycle)
118
- grouped_inputs = reduce(group_by_keys(['term', 'operation', 'animal']), inputs, {})
134
+ grouped_inputs = group_by_keys(inputs, ['term', 'operation', 'animal'])
119
135
  return flatten(map(_run_input(cycle), grouped_inputs.values()))
@@ -1,13 +1,17 @@
1
- from functools import reduce
2
1
  from statistics import mean
3
2
  from hestia_earth.schema import IndicatorMethodTier, TermTermType
4
3
  from hestia_earth.utils.tools import flatten, list_sum
4
+ from hestia_earth.utils.blank_node import group_by_keys
5
5
 
6
6
  from hestia_earth.models.log import logShouldRun, logRequirements
7
7
  from hestia_earth.models.utils.indicator import _new_indicator
8
- from hestia_earth.models.utils.background_emissions import get_background_inputs, log_missing_emissions
9
- from hestia_earth.models.utils.blank_node import group_by_keys
10
- from .utils import get_input_mappings, process_input, parse_term_id
8
+ from hestia_earth.models.utils.background_emissions import (
9
+ get_background_inputs,
10
+ log_missing_emissions,
11
+ parse_term_id,
12
+ process_input_mappings
13
+ )
14
+ from .utils import get_input_mappings, extract_input_mapping
11
15
  from . import MODEL
12
16
 
13
17
  REQUIREMENTS = {
@@ -74,6 +78,8 @@ def _indicator(
74
78
  indicator['inputs'] = [input.get('term')]
75
79
  if input.get('operation'):
76
80
  indicator['operation'] = input.get('operation')
81
+ if input.get('animal'):
82
+ indicator['animals'] = [input.get('animal')]
77
83
  return indicator
78
84
 
79
85
 
@@ -89,20 +95,34 @@ def _run_input(impact_assessment: dict):
89
95
  mappings = get_input_mappings(MODEL, input)
90
96
  has_mappings = len(mappings) > 0
91
97
 
98
+ # grouping the inputs together in the logs
99
+ input_parent_term_id = (input.get('parent', {})).get('@id') or input.get('animalId', {})
100
+ extra_logs = {
101
+ **({'input_group_id': input_parent_term_id} if input_parent_term_id else {}),
102
+ **({'animalId': input.get('animalId')} if input.get('animalId') else {})
103
+ }
104
+
92
105
  logRequirements(impact_assessment, model=MODEL, term=input_term_id, model_key=MODEL_KEY,
93
106
  has_mappings=has_mappings,
94
107
  mappings=';'.join([v[1] for v in mappings]),
95
- input_value=input_value)
108
+ input_value=input_value,
109
+ **extra_logs)
96
110
 
97
111
  should_run = all([has_mappings, input_value])
98
112
  logShouldRun(
99
- impact_assessment, MODEL, input_term_id, should_run, methodTier=TIER, model_key=MODEL_KEY
113
+ impact_assessment, MODEL, input_term_id, should_run, methodTier=TIER, model_key=MODEL_KEY, **extra_logs
100
114
  )
101
115
 
102
- results = process_input(
103
- impact_assessment, input, mappings, TermTermType.RESOURCEUSE, model_key=MODEL_KEY
116
+ results = process_input_mappings(
117
+ impact_assessment, input, mappings, TermTermType.RESOURCEUSE,
118
+ extract_mapping=extract_input_mapping,
119
+ **(
120
+ extra_logs | {'model': MODEL, 'model_key': MODEL_KEY}
121
+ )
104
122
  ) if should_run else {}
105
- log_missing_emissions_func(input_term_id, list(map(parse_term_id, results.keys())))
123
+ log_missing_emissions_func(input_term_id, list(map(parse_term_id, results.keys())), **(
124
+ extra_logs | {'has_mappings': has_mappings}
125
+ ))
106
126
  return [
107
127
  _indicator(
108
128
  term_id=parse_term_id(term_id),
@@ -120,5 +140,5 @@ def _run_input(impact_assessment: dict):
120
140
 
121
141
  def run(impact_assessment: dict):
122
142
  inputs = get_background_inputs(impact_assessment.get('cycle', {}))
123
- grouped_inputs = reduce(group_by_keys(['term', 'operation']), inputs, {})
143
+ grouped_inputs = group_by_keys(inputs, ['term', 'operation', 'animal'])
124
144
  return flatten(map(_run_input(impact_assessment), grouped_inputs.values()))