hestia-earth-models 0.75.0__py3-none-any.whl → 0.75.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hestia-earth-models might be problematic. Click here for more details.
- hestia_earth/models/aware/scarcityWeightedWaterUse.py +2 -4
- hestia_earth/models/aware2_0/scarcityWeightedWaterUse.py +2 -5
- hestia_earth/models/chaudharyBrooks2018/utils.py +2 -2
- hestia_earth/models/cml2001Baseline/abioticResourceDepletionFossilFuels.py +3 -2
- hestia_earth/models/cml2001Baseline/abioticResourceDepletionMineralsAndMetals.py +13 -6
- hestia_earth/models/config/Cycle.json +15 -0
- hestia_earth/models/cycle/product/economicValueShare.py +4 -4
- hestia_earth/models/ecoalimV9/utils.py +3 -3
- hestia_earth/models/ecoinventV3/utils.py +2 -2
- hestia_earth/models/ecoinventV3AndEmberClimate/utils.py +2 -2
- hestia_earth/models/emissionNotRelevant/__init__.py +2 -2
- hestia_earth/models/environmentalFootprintV3_1/environmentalFootprintSingleOverallScore.py +1 -2
- hestia_earth/models/environmentalFootprintV3_1/soilQualityIndexLandOccupation.py +8 -5
- hestia_earth/models/faostat2018/utils.py +3 -3
- hestia_earth/models/frischknechtEtAl2000/ionisingRadiationKbqU235Eq.py +5 -4
- hestia_earth/models/geospatialDatabase/ecoClimateZone.py +2 -2
- hestia_earth/models/geospatialDatabase/histosol.py +31 -11
- hestia_earth/models/hestia/aboveGroundCropResidueTotal.py +2 -2
- hestia_earth/models/hestia/landCover_utils.py +8 -12
- hestia_earth/models/hestia/management.py +3 -3
- hestia_earth/models/hestia/seed_emissions.py +2 -2
- hestia_earth/models/hestia/soilClassification.py +31 -13
- hestia_earth/models/ipcc2019/animal/pastureGrass.py +3 -1
- hestia_earth/models/ipcc2019/burning_utils.py +406 -4
- hestia_earth/models/ipcc2019/ch4ToAirEntericFermentation.py +12 -9
- hestia_earth/models/ipcc2019/ch4ToAirExcreta.py +28 -10
- hestia_earth/models/ipcc2019/ch4ToAirOrganicSoilCultivation.py +8 -11
- hestia_earth/models/ipcc2019/co2ToAirOrganicSoilCultivation.py +9 -12
- hestia_earth/models/ipcc2019/emissionsToAirOrganicSoilBurning.py +516 -0
- hestia_earth/models/ipcc2019/n2OToAirOrganicSoilCultivationDirect.py +10 -13
- hestia_earth/models/ipcc2019/nonCo2EmissionsToAirNaturalVegetationBurning.py +56 -433
- hestia_earth/models/ipcc2019/organicSoilCultivation_utils.py +2 -2
- hestia_earth/models/ipcc2019/pastureGrass.py +3 -1
- hestia_earth/models/ipcc2019/pastureGrass_utils.py +6 -3
- hestia_earth/models/ipcc2019/utils.py +3 -2
- hestia_earth/models/linkedImpactAssessment/emissions.py +2 -2
- hestia_earth/models/mocking/search-results.json +1 -1
- hestia_earth/models/requirements.py +2 -2
- hestia_earth/models/utils/aggregated.py +2 -2
- hestia_earth/models/utils/background_emissions.py +6 -5
- hestia_earth/models/utils/blank_node.py +68 -0
- hestia_earth/models/utils/ecoClimateZone.py +7 -8
- hestia_earth/models/utils/excretaManagement.py +3 -3
- hestia_earth/models/utils/feedipedia.py +7 -7
- hestia_earth/models/utils/impact_assessment.py +3 -0
- hestia_earth/models/utils/input.py +2 -2
- hestia_earth/models/utils/liveAnimal.py +4 -4
- hestia_earth/models/utils/lookup.py +15 -20
- hestia_earth/models/utils/property.py +3 -3
- hestia_earth/models/utils/term.py +5 -5
- hestia_earth/models/version.py +1 -1
- hestia_earth/orchestrator/models/transformations.py +2 -2
- hestia_earth/orchestrator/strategies/merge/merge_node.py +32 -2
- {hestia_earth_models-0.75.0.dist-info → hestia_earth_models-0.75.2.dist-info}/METADATA +2 -2
- {hestia_earth_models-0.75.0.dist-info → hestia_earth_models-0.75.2.dist-info}/RECORD +58 -57
- {hestia_earth_models-0.75.0.dist-info → hestia_earth_models-0.75.2.dist-info}/WHEEL +0 -0
- {hestia_earth_models-0.75.0.dist-info → hestia_earth_models-0.75.2.dist-info}/licenses/LICENSE +0 -0
- {hestia_earth_models-0.75.0.dist-info → hestia_earth_models-0.75.2.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from hestia_earth.schema import SiteSiteType
|
|
2
2
|
from hestia_earth.utils.model import find_term_match
|
|
3
|
-
from hestia_earth.utils.lookup import download_lookup,
|
|
3
|
+
from hestia_earth.utils.lookup import download_lookup, get_table_value
|
|
4
4
|
from hestia_earth.utils.tools import safe_parse_float
|
|
5
5
|
|
|
6
6
|
from hestia_earth.models.log import logRequirements, debugMissingLookup, logShouldRun
|
|
@@ -57,9 +57,7 @@ def _indicator(value: float): return _new_indicator(term=TERM_ID, model=MODEL, v
|
|
|
57
57
|
|
|
58
58
|
def _get_factor_from_basinId(site: dict, aware_id: str):
|
|
59
59
|
lookup_col = 'YR_IRRI' if site.get('siteType') in IRRIGATED_SITE_TYPES else 'YR_NONIRRI'
|
|
60
|
-
value =
|
|
61
|
-
download_lookup(_AWARE_LOOKUP), column_name(AWARE_KEY), int(aware_id), column_name(lookup_col)
|
|
62
|
-
)
|
|
60
|
+
value = get_table_value(download_lookup(_AWARE_LOOKUP), AWARE_KEY, int(aware_id), lookup_col)
|
|
63
61
|
debugMissingLookup(_AWARE_LOOKUP, AWARE_KEY, aware_id, lookup_col, value, model=MODEL, term=TERM_ID)
|
|
64
62
|
return safe_parse_float(value, default=None)
|
|
65
63
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from hestia_earth.schema import SiteSiteType
|
|
2
2
|
from hestia_earth.utils.model import find_term_match
|
|
3
|
-
from hestia_earth.utils.lookup import download_lookup,
|
|
3
|
+
from hestia_earth.utils.lookup import download_lookup, get_table_value
|
|
4
4
|
from hestia_earth.utils.tools import safe_parse_float
|
|
5
5
|
|
|
6
6
|
from hestia_earth.models.log import logRequirements, debugMissingLookup, logShouldRun
|
|
@@ -67,10 +67,7 @@ def _lookup_column(site: dict):
|
|
|
67
67
|
def _get_factor_from_basinId(site: dict, aware_id: str):
|
|
68
68
|
lookup_col = _lookup_column(site)
|
|
69
69
|
lookup = download_lookup(_AWARE_LOOKUP)
|
|
70
|
-
|
|
71
|
-
value = _get_single_table_value(lookup, column_name(AWARE_KEY), int(aware_id), column_name(lookup_col))
|
|
72
|
-
except Exception:
|
|
73
|
-
value = None
|
|
70
|
+
value = get_table_value(lookup, AWARE_KEY, int(aware_id), lookup_col)
|
|
74
71
|
debugMissingLookup(_AWARE_LOOKUP, AWARE_KEY, aware_id, lookup_col, value, model=MODEL, term=TERM_ID)
|
|
75
72
|
return safe_parse_float(value, default=None)
|
|
76
73
|
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from hestia_earth.utils.lookup import download_lookup, get_table_value,
|
|
1
|
+
from hestia_earth.utils.lookup import download_lookup, get_table_value, extract_grouped_data
|
|
2
2
|
from hestia_earth.utils.tools import safe_parse_float
|
|
3
3
|
|
|
4
4
|
from hestia_earth.models.log import debugMissingLookup, logRequirements
|
|
@@ -8,7 +8,7 @@ from . import MODEL
|
|
|
8
8
|
|
|
9
9
|
|
|
10
10
|
def _lookup_value(term_id: str, lookup_name: str, col_match: str, col_val: str, column: str):
|
|
11
|
-
value = get_table_value(download_lookup(lookup_name), col_match, col_val,
|
|
11
|
+
value = get_table_value(download_lookup(lookup_name), col_match, col_val, column)
|
|
12
12
|
debugMissingLookup(lookup_name, col_match, col_val, column, value, model=MODEL, term=term_id)
|
|
13
13
|
return value
|
|
14
14
|
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from itertools import chain
|
|
2
|
-
from hestia_earth.utils.lookup import download_lookup,
|
|
2
|
+
from hestia_earth.utils.lookup import download_lookup, find_term_ids_by
|
|
3
3
|
from hestia_earth.utils.tools import list_sum, flatten
|
|
4
4
|
|
|
5
5
|
from hestia_earth.models.log import logShouldRun, logRequirements, log_as_table
|
|
@@ -49,7 +49,8 @@ def get_all_non_renewable_terms(lookup_file_name: str, column: str) -> list:
|
|
|
49
49
|
returns all non renewable term ids in lookup files like `electricity.csv` or `fuel.csv`
|
|
50
50
|
"""
|
|
51
51
|
lookup = download_lookup(lookup_file_name)
|
|
52
|
-
|
|
52
|
+
|
|
53
|
+
return find_term_ids_by(lookup, column, True)
|
|
53
54
|
|
|
54
55
|
|
|
55
56
|
def _valid_resource_indicator(resource: dict) -> bool:
|
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
from hestia_earth.schema import TermTermType
|
|
2
|
-
from hestia_earth.utils.lookup import get_table_value, download_lookup
|
|
2
|
+
from hestia_earth.utils.lookup import get_table_value, download_lookup
|
|
3
3
|
from hestia_earth.utils.model import filter_list_term_type
|
|
4
4
|
from hestia_earth.utils.tools import list_sum, flatten
|
|
5
|
+
from hestia_earth.utils.lookup import is_missing_value
|
|
5
6
|
|
|
6
7
|
from hestia_earth.models.log import logRequirements, logShouldRun, log_as_table
|
|
7
8
|
from . import MODEL
|
|
@@ -81,16 +82,22 @@ def _should_run(impact_assessment: dict) -> tuple[bool, list]:
|
|
|
81
82
|
"value": _node_value(resource_indicator),
|
|
82
83
|
"coefficient": get_table_value(
|
|
83
84
|
lookup=download_lookup(filename=f"{input.get('termType')}.csv"),
|
|
84
|
-
col_match='
|
|
85
|
+
col_match='term.id',
|
|
85
86
|
col_match_with=input.get('@id'),
|
|
86
|
-
col_val=
|
|
87
|
+
col_val=LOOKUPS.get(input.get('termType', '')),
|
|
88
|
+
default_value=None
|
|
89
|
+
) if input else None
|
|
87
90
|
} for input in resource_indicator['inputs'] or [{}]]
|
|
88
91
|
for resource_indicator in emissions_resource_use
|
|
89
92
|
]
|
|
90
93
|
)
|
|
91
|
-
valid_resources_with_cf = [
|
|
92
|
-
|
|
93
|
-
|
|
94
|
+
valid_resources_with_cf = [
|
|
95
|
+
em for em in resource_uses_unpacked if all([
|
|
96
|
+
not is_missing_value(em['coefficient']),
|
|
97
|
+
em['indicator-is-valid'] is True,
|
|
98
|
+
em['indicator-input-is-valid'] is True,
|
|
99
|
+
])
|
|
100
|
+
]
|
|
94
101
|
|
|
95
102
|
has_valid_input_requirements = all([
|
|
96
103
|
all([
|
|
@@ -1294,6 +1294,21 @@
|
|
|
1294
1294
|
},
|
|
1295
1295
|
"stage": 2
|
|
1296
1296
|
},
|
|
1297
|
+
{
|
|
1298
|
+
"key": "emissions",
|
|
1299
|
+
"model": "ipcc2019",
|
|
1300
|
+
"value": "emissionsToAirOrganicSoilBurning",
|
|
1301
|
+
"runStrategy": "always",
|
|
1302
|
+
"runArgs": {
|
|
1303
|
+
"runNonMeasured": true,
|
|
1304
|
+
"runNonAddedTerm": true
|
|
1305
|
+
},
|
|
1306
|
+
"mergeStrategy": "list",
|
|
1307
|
+
"mergeArgs": {
|
|
1308
|
+
"replaceThreshold": ["value", 0.01]
|
|
1309
|
+
},
|
|
1310
|
+
"stage": 2
|
|
1311
|
+
},
|
|
1297
1312
|
{
|
|
1298
1313
|
"key": "emissions",
|
|
1299
1314
|
"model": "schmidt2007",
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from hestia_earth.utils.model import find_term_match
|
|
2
|
-
from hestia_earth.utils.tools import list_sum
|
|
2
|
+
from hestia_earth.utils.tools import list_sum, to_precision
|
|
3
3
|
|
|
4
4
|
from hestia_earth.models.log import logRequirements, logShouldRun, log_as_table
|
|
5
5
|
from hestia_earth.models.utils.term import get_lookup_value
|
|
@@ -43,7 +43,7 @@ MIN_COMPLETE_VALUE = 80 # when the products are complete lower the min threshol
|
|
|
43
43
|
|
|
44
44
|
|
|
45
45
|
def _product(product: dict, value: float):
|
|
46
|
-
return product | {MODEL_KEY: value}
|
|
46
|
+
return product | {MODEL_KEY: to_precision(value, 2 if value < 1 else 3 if value < 10 else 4)}
|
|
47
47
|
|
|
48
48
|
|
|
49
49
|
def _is_complete(cycle: dict): return cycle.get('completeness', {}).get('product', False)
|
|
@@ -57,11 +57,11 @@ def _total_evs(products: list): return sum([p.get(MODEL_KEY, 0) for p in product
|
|
|
57
57
|
|
|
58
58
|
def _product_with_value(product: dict):
|
|
59
59
|
value = product.get(MODEL_KEY, lookup_share(MODEL_KEY, product))
|
|
60
|
-
return product
|
|
60
|
+
return _product(product, value) if value is not None else product
|
|
61
61
|
|
|
62
62
|
|
|
63
63
|
def _rescale_value(products: list, total: float):
|
|
64
|
-
return list(map(lambda p:
|
|
64
|
+
return list(map(lambda p: _product(p, p.get(MODEL_KEY) * 100 / total), products))
|
|
65
65
|
|
|
66
66
|
|
|
67
67
|
def _should_run_by_default(cycle: dict, products: list):
|
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
import os
|
|
2
2
|
from functools import lru_cache
|
|
3
3
|
from hestia_earth.schema import TermTermType
|
|
4
|
-
from hestia_earth.utils.lookup import download_lookup, get_table_value
|
|
4
|
+
from hestia_earth.utils.lookup import download_lookup, get_table_value
|
|
5
5
|
from hestia_earth.utils.tools import non_empty_list, safe_parse_float, omit
|
|
6
6
|
|
|
7
7
|
from hestia_earth.models.utils.term import get_lookup_value
|
|
8
8
|
from hestia_earth.models.utils.background_emissions import convert_background_lookup
|
|
9
9
|
|
|
10
|
-
_LOOKUP_INDEX_KEY =
|
|
10
|
+
_LOOKUP_INDEX_KEY = 'ecoalimMappingName'
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
def get_input_mappings(model: str, input: dict):
|
|
@@ -58,7 +58,7 @@ def _cutoff_id(term_id: str, country_id: str = None, key_id: str = None):
|
|
|
58
58
|
def cutoff_value(cutoff_lookup, term_id: str, country_id: str = None, key_id: str = None):
|
|
59
59
|
cutoff_id = _cutoff_id(term_id=term_id, country_id=country_id, key_id=key_id)
|
|
60
60
|
return None if cutoff_lookup is None else safe_parse_float(
|
|
61
|
-
get_table_value(cutoff_lookup, '
|
|
61
|
+
get_table_value(cutoff_lookup, 'term.id', cutoff_id, 'percentage'),
|
|
62
62
|
default=None
|
|
63
63
|
)
|
|
64
64
|
|
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
from functools import lru_cache
|
|
2
2
|
from hestia_earth.schema import TermTermType
|
|
3
|
-
from hestia_earth.utils.lookup import
|
|
3
|
+
from hestia_earth.utils.lookup import load_lookup
|
|
4
4
|
from hestia_earth.utils.tools import non_empty_list
|
|
5
5
|
|
|
6
6
|
from hestia_earth.models.data.ecoinventV3 import get_filepath
|
|
7
7
|
from hestia_earth.models.utils.term import get_lookup_value
|
|
8
8
|
from hestia_earth.models.utils.background_emissions import convert_background_lookup
|
|
9
9
|
|
|
10
|
-
_LOOKUP_INDEX_KEY =
|
|
10
|
+
_LOOKUP_INDEX_KEY = 'ecoinventName'
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
def get_input_mappings(model: str, input: dict):
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from hestia_earth.utils.lookup import download_lookup, get_table_value,
|
|
1
|
+
from hestia_earth.utils.lookup import download_lookup, get_table_value, extract_grouped_data
|
|
2
2
|
from hestia_earth.utils.tools import safe_parse_float
|
|
3
3
|
|
|
4
4
|
from hestia_earth.models.utils.cycle import cycle_end_year
|
|
@@ -14,7 +14,7 @@ def get_input_coefficient(model: str, cycle: dict, country_id: str, ecoinventNam
|
|
|
14
14
|
# find the matching ember source with the ecoinventName.
|
|
15
15
|
# example: "electricity, high voltage, electricity production, hard coal" > "Coal"
|
|
16
16
|
ember_ecoinvent_lookup = download_lookup(EMBER_ECOINVENT_LOOKUP_NAME)
|
|
17
|
-
source_name = get_table_value(ember_ecoinvent_lookup,
|
|
17
|
+
source_name = get_table_value(ember_ecoinvent_lookup, 'ecoinventName', ecoinventName, 'ember')
|
|
18
18
|
|
|
19
19
|
# find the ratio for the country / year
|
|
20
20
|
data = get_region_lookup_value(REGION_EMBER_SOURCES_LOOKUP_NAME, country_id, source_name, model=model)
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from hestia_earth.schema import NodeType, EmissionMethodTier, TermTermType
|
|
2
|
-
from hestia_earth.utils.lookup import download_lookup, lookup_term_ids, lookup_columns
|
|
2
|
+
from hestia_earth.utils.lookup import download_lookup, lookup_term_ids, lookup_columns
|
|
3
3
|
from hestia_earth.utils.lookup_utils import is_in_system_boundary
|
|
4
4
|
from hestia_earth.utils.tools import flatten
|
|
5
5
|
|
|
@@ -49,7 +49,7 @@ def _emission_ids():
|
|
|
49
49
|
def _model_ids(lookup_suffix: str):
|
|
50
50
|
return [
|
|
51
51
|
col for col in lookup_columns(download_lookup(f"emission-model-{lookup_suffix}.csv"))
|
|
52
|
-
if col !=
|
|
52
|
+
if col != 'term.id'
|
|
53
53
|
]
|
|
54
54
|
|
|
55
55
|
|
|
@@ -53,8 +53,7 @@ def _is_a_PEF_indicator(indicator: dict) -> bool:
|
|
|
53
53
|
|
|
54
54
|
|
|
55
55
|
def _get_factor(term: dict, column: str) -> Optional[float]:
|
|
56
|
-
|
|
57
|
-
return float(factor) if factor is not None else None
|
|
56
|
+
return get_lookup_value(term, column, model=MODEL, term=TERM_ID)
|
|
58
57
|
|
|
59
58
|
|
|
60
59
|
def _get_pef_method_model(term: dict) -> List[str]:
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
from typing import Tuple
|
|
2
|
-
|
|
3
2
|
from hestia_earth.schema import TermTermType
|
|
4
3
|
from hestia_earth.utils.model import filter_list_term_type
|
|
5
4
|
from hestia_earth.utils.tools import list_sum
|
|
5
|
+
from hestia_earth.utils.lookup import is_missing_value
|
|
6
6
|
|
|
7
7
|
from hestia_earth.models.log import logRequirements, logShouldRun, log_as_table
|
|
8
8
|
from hestia_earth.models.utils.indicator import _new_indicator
|
|
@@ -86,10 +86,13 @@ def _should_run(impact_assessment: dict) -> Tuple[bool, list]:
|
|
|
86
86
|
for indicator in found_land_occupation_indicators
|
|
87
87
|
]) if found_land_occupation_indicators else False
|
|
88
88
|
|
|
89
|
-
valid_indicator_with_coef = [
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
89
|
+
valid_indicator_with_coef = [
|
|
90
|
+
indicator for indicator in found_indicators_with_coefficient if all([
|
|
91
|
+
not is_missing_value(indicator['coefficient']),
|
|
92
|
+
indicator['area-by-year-is-valid'],
|
|
93
|
+
indicator['area-unit-is-valid']
|
|
94
|
+
])
|
|
95
|
+
]
|
|
93
96
|
|
|
94
97
|
has_land_occupation_indicators = bool(land_occupation_indicators)
|
|
95
98
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from hestia_earth.schema import TermTermType
|
|
2
2
|
from hestia_earth.utils.api import download_hestia
|
|
3
|
-
from hestia_earth.utils.lookup import download_lookup, get_table_value,
|
|
3
|
+
from hestia_earth.utils.lookup import download_lookup, get_table_value, extract_grouped_data_closest_date
|
|
4
4
|
from hestia_earth.utils.tools import safe_parse_float, flatten, non_empty_list
|
|
5
5
|
|
|
6
6
|
from hestia_earth.models.log import logger, debugMissingLookup, logRequirements, logShouldRun, debugValues, log_as_table
|
|
@@ -20,8 +20,8 @@ FAOSTAT_AREA_LOOKUP = 'region-faostatArea.csv'
|
|
|
20
20
|
def get_liveAnimal_to_animalProduct_id(product_term_id: str, column: str, **log_args):
|
|
21
21
|
lookup_name = 'liveAnimal.csv'
|
|
22
22
|
lookup = download_lookup(lookup_name)
|
|
23
|
-
value = get_table_value(lookup, '
|
|
24
|
-
debugMissingLookup(lookup_name, '
|
|
23
|
+
value = get_table_value(lookup, 'term.id', product_term_id, column)
|
|
24
|
+
debugMissingLookup(lookup_name, 'term.id', product_term_id, column, value, model=MODEL, **log_args)
|
|
25
25
|
return value
|
|
26
26
|
|
|
27
27
|
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from hestia_earth.schema import TermTermType
|
|
2
|
-
from hestia_earth.utils.lookup import get_table_value, download_lookup,
|
|
2
|
+
from hestia_earth.utils.lookup import get_table_value, download_lookup, is_missing_value
|
|
3
3
|
from hestia_earth.utils.model import filter_list_term_type
|
|
4
4
|
from hestia_earth.utils.tools import flatten, list_sum, omit, pick, unique_values
|
|
5
5
|
from hestia_earth.utils.blank_node import group_by_keys
|
|
@@ -82,9 +82,10 @@ def _should_run(impact_assessment: dict) -> tuple[bool, list]:
|
|
|
82
82
|
"value": _node_value(emission),
|
|
83
83
|
"coefficient": get_table_value(
|
|
84
84
|
lookup=download_lookup(filename="waste.csv"),
|
|
85
|
-
col_match='
|
|
85
|
+
col_match='term.id',
|
|
86
86
|
col_match_with=emission['key'].get('@id'),
|
|
87
|
-
col_val=
|
|
87
|
+
col_val=emission['term']['@id'],
|
|
88
|
+
default_value=None
|
|
88
89
|
)
|
|
89
90
|
} | pick(emission, ['key', 'inputs'])
|
|
90
91
|
for emission in emissions
|
|
@@ -93,7 +94,7 @@ def _should_run(impact_assessment: dict) -> tuple[bool, list]:
|
|
|
93
94
|
|
|
94
95
|
valid_emission_with_cf = [
|
|
95
96
|
em for em in emissions_unpacked if all([
|
|
96
|
-
em['coefficient']
|
|
97
|
+
not is_missing_value(em['coefficient']),
|
|
97
98
|
em['key-is-valid'] is True
|
|
98
99
|
])
|
|
99
100
|
]
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from hestia_earth.schema import MeasurementMethodClassification
|
|
2
|
-
from hestia_earth.utils.lookup import download_lookup,
|
|
2
|
+
from hestia_earth.utils.lookup import download_lookup, get_table_value
|
|
3
3
|
|
|
4
4
|
from hestia_earth.models.log import logRequirements, logShouldRun
|
|
5
5
|
from hestia_earth.models.utils.measurement import _new_measurement
|
|
@@ -44,7 +44,7 @@ def _measurement(site: dict, value: int):
|
|
|
44
44
|
|
|
45
45
|
def _name(value: int):
|
|
46
46
|
lookup = download_lookup(f"{TERM_ID}.csv")
|
|
47
|
-
return
|
|
47
|
+
return get_table_value(lookup, TERM_ID, value, 'name')
|
|
48
48
|
|
|
49
49
|
|
|
50
50
|
def _run(site: dict):
|
|
@@ -1,7 +1,10 @@
|
|
|
1
|
-
from hestia_earth.schema import MeasurementMethodClassification
|
|
1
|
+
from hestia_earth.schema import MeasurementMethodClassification, TermTermType
|
|
2
|
+
from hestia_earth.utils.model import filter_list_term_type
|
|
3
|
+
from hestia_earth.utils.blank_node import get_node_value
|
|
4
|
+
from hestia_earth.utils.tools import pick
|
|
2
5
|
|
|
3
|
-
from hestia_earth.models.log import logRequirements, logShouldRun
|
|
4
|
-
from hestia_earth.models.utils.measurement import _new_measurement
|
|
6
|
+
from hestia_earth.models.log import logRequirements, logShouldRun, log_as_table
|
|
7
|
+
from hestia_earth.models.utils.measurement import _new_measurement
|
|
5
8
|
from hestia_earth.models.utils.source import get_source
|
|
6
9
|
from .utils import download, has_geospatial_data, should_download
|
|
7
10
|
from . import MODEL
|
|
@@ -16,10 +19,10 @@ REQUIREMENTS = {
|
|
|
16
19
|
"none": {
|
|
17
20
|
"measurements": [{
|
|
18
21
|
"@type": "Measurement",
|
|
19
|
-
"value": "
|
|
22
|
+
"value": "> 0",
|
|
20
23
|
"depthUpper": "0",
|
|
21
24
|
"depthLower": "30",
|
|
22
|
-
"term.termType": "soilType"
|
|
25
|
+
"term.termType": ["soilType", "usdaSoilType"]
|
|
23
26
|
}]
|
|
24
27
|
}
|
|
25
28
|
}
|
|
@@ -39,12 +42,14 @@ EE_PARAMS = {
|
|
|
39
42
|
'reducer': 'mean'
|
|
40
43
|
}
|
|
41
44
|
BIBLIO_TITLE = 'Harmonized World Soil Database Version 1.2. Food and Agriculture Organization of the United Nations (FAO).' # noqa: E501
|
|
45
|
+
_DEPTH_UPPER = 0
|
|
46
|
+
_DEPTH_LOWER = 30
|
|
42
47
|
|
|
43
48
|
|
|
44
49
|
def _measurement(site: dict, value: float):
|
|
45
50
|
measurement = _new_measurement(term=TERM_ID, value=value)
|
|
46
|
-
measurement['depthUpper'] =
|
|
47
|
-
measurement['depthLower'] =
|
|
51
|
+
measurement['depthUpper'] = _DEPTH_UPPER
|
|
52
|
+
measurement['depthLower'] = _DEPTH_LOWER
|
|
48
53
|
measurement['methodClassification'] = MeasurementMethodClassification.GEOSPATIAL_DATASET.value
|
|
49
54
|
return measurement | get_source(site, BIBLIO_TITLE)
|
|
50
55
|
|
|
@@ -58,15 +63,30 @@ def _should_run(site: dict):
|
|
|
58
63
|
contains_geospatial_data = has_geospatial_data(site)
|
|
59
64
|
below_max_area_size = should_download(TERM_ID, site)
|
|
60
65
|
|
|
61
|
-
|
|
66
|
+
measurements = filter_list_term_type(site.get('measurements', []), [
|
|
67
|
+
TermTermType.SOILTYPE,
|
|
68
|
+
TermTermType.USDASOILTYPE
|
|
69
|
+
])
|
|
70
|
+
measurements = [m for m in measurements if all([
|
|
71
|
+
m.get('depthUpper', -1) == _DEPTH_UPPER,
|
|
72
|
+
m.get('depthLower', 0) == _DEPTH_LOWER,
|
|
73
|
+
get_node_value(m) > 0
|
|
74
|
+
])]
|
|
75
|
+
has_soil_type_measurements = len(measurements) > 0
|
|
62
76
|
|
|
63
77
|
logRequirements(site, model=MODEL, term=TERM_ID,
|
|
64
78
|
contains_geospatial_data=contains_geospatial_data,
|
|
65
79
|
below_max_area_size=below_max_area_size,
|
|
66
|
-
|
|
67
|
-
|
|
80
|
+
has_soil_type_measurements=has_soil_type_measurements,
|
|
81
|
+
soil_type_measurements=log_as_table([
|
|
82
|
+
{
|
|
83
|
+
'id': m.get('term', {}).get('@id'),
|
|
84
|
+
'termType': m.get('term', {}).get('termType'),
|
|
85
|
+
'value': get_node_value(m)
|
|
86
|
+
} | pick(m, ['depthUpper', 'depthLower']) for m in measurements
|
|
87
|
+
]))
|
|
68
88
|
|
|
69
|
-
should_run = all([contains_geospatial_data, below_max_area_size,
|
|
89
|
+
should_run = all([contains_geospatial_data, below_max_area_size, not has_soil_type_measurements])
|
|
70
90
|
logShouldRun(site, MODEL, TERM_ID, should_run)
|
|
71
91
|
return should_run
|
|
72
92
|
|
|
@@ -9,8 +9,8 @@ from . import MODEL
|
|
|
9
9
|
|
|
10
10
|
REQUIREMENTS = {
|
|
11
11
|
"Cycle": {
|
|
12
|
-
"practices": [{"@type": "Practice", "value": "", "term.termType": "
|
|
13
|
-
"products": [{"@type": "Product", "value": "", "term.termType": "
|
|
12
|
+
"practices": [{"@type": "Practice", "value": "", "term.termType": "cropResidueManagement"}],
|
|
13
|
+
"products": [{"@type": "Product", "value": "", "term.termType": "cropResidue"}]
|
|
14
14
|
}
|
|
15
15
|
}
|
|
16
16
|
RETURNS = {
|
|
@@ -3,7 +3,7 @@ from functools import lru_cache
|
|
|
3
3
|
from collections import defaultdict
|
|
4
4
|
from hestia_earth.schema import TermTermType
|
|
5
5
|
from hestia_earth.utils.lookup import (
|
|
6
|
-
download_lookup, get_table_value,
|
|
6
|
+
download_lookup, get_table_value, is_missing_value, extract_grouped_data, lookup_columns, lookup_term_ids
|
|
7
7
|
)
|
|
8
8
|
from hestia_earth.utils.tools import safe_parse_float
|
|
9
9
|
|
|
@@ -57,10 +57,6 @@ def _get_lookup_with_cache(lookup_term: dict, column: str):
|
|
|
57
57
|
def _get_faostat_name(term: dict) -> str: return _get_lookup_with_cache(term, "cropGroupingFaostatArea")
|
|
58
58
|
|
|
59
59
|
|
|
60
|
-
def _is_missing_or_none(value) -> bool:
|
|
61
|
-
return value is None or _is_missing_value(value)
|
|
62
|
-
|
|
63
|
-
|
|
64
60
|
def _safe_divide(numerator, denominator, default=0) -> float:
|
|
65
61
|
return default if not denominator else numerator / denominator
|
|
66
62
|
|
|
@@ -354,15 +350,15 @@ def _get_most_common_or_alphabetically_first(crop_terms: list) -> str:
|
|
|
354
350
|
return sorted([term for term, freq in histogram.items() if freq == max_freq])[0]
|
|
355
351
|
|
|
356
352
|
|
|
353
|
+
@lru_cache()
|
|
357
354
|
def _get_complete_faostat_to_crop_mapping() -> dict:
|
|
358
355
|
"""Returns mapping in the format: {faostat_name: IPCC_LAND_USE_CATEGORY, ...}"""
|
|
359
356
|
term_type = TermTermType.CROP.value
|
|
360
357
|
lookup = download_lookup(f"{term_type}.csv")
|
|
358
|
+
term_ids = lookup_term_ids(lookup)
|
|
361
359
|
mappings = defaultdict(list)
|
|
362
|
-
for crop_term_id in
|
|
363
|
-
key =
|
|
364
|
-
get_table_value(lookup, 'termid', crop_term_id, column_name("cropGroupingFaostatArea"))
|
|
365
|
-
)
|
|
360
|
+
for crop_term_id in term_ids:
|
|
361
|
+
key = get_table_value(lookup, 'term.id', crop_term_id, "cropGroupingFaostatArea")
|
|
366
362
|
if key:
|
|
367
363
|
mappings[key].append(crop_ipcc_land_use_category(crop_term_id=crop_term_id, lookup_term_type="crop"))
|
|
368
364
|
return {
|
|
@@ -409,7 +405,7 @@ def _get_sum_for_land_category(
|
|
|
409
405
|
return sum([
|
|
410
406
|
safe_parse_float(value=extract_grouped_data(table_value, str(year)), default=None)
|
|
411
407
|
for fao_name, table_value in values.items()
|
|
412
|
-
if not
|
|
408
|
+
if not is_missing_value(extract_grouped_data(table_value, str(year))) and
|
|
413
409
|
fao_stat_to_ipcc_type[fao_name] == ipcc_land_use_category and
|
|
414
410
|
(
|
|
415
411
|
include_negatives or
|
|
@@ -426,8 +422,8 @@ def _get_sums_of_crop_expansion(country_id: str, year: int, include_negatives: b
|
|
|
426
422
|
lookup = get_region_lookup(lookup_name=_LOOKUP_EXPANSION, term_id=country_id)
|
|
427
423
|
columns = lookup_columns(lookup)
|
|
428
424
|
values = {
|
|
429
|
-
name: get_table_value(lookup, '
|
|
430
|
-
for name in columns if name !=
|
|
425
|
+
name: get_table_value(lookup, 'term.id', country_id, name)
|
|
426
|
+
for name in columns if name != 'term.id'
|
|
431
427
|
}
|
|
432
428
|
|
|
433
429
|
fao_stat_to_ipcc_type = _get_complete_faostat_to_crop_mapping()
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from typing import List
|
|
2
2
|
from datetime import timedelta, datetime
|
|
3
3
|
from hestia_earth.schema import SchemaType, TermTermType, COMPLETENESS_MAPPING
|
|
4
|
-
from hestia_earth.utils.lookup import
|
|
4
|
+
from hestia_earth.utils.lookup import download_lookup, get_table_value
|
|
5
5
|
from hestia_earth.utils.model import filter_list_term_type
|
|
6
6
|
from hestia_earth.utils.tools import safe_parse_float, flatten, is_number, is_boolean, pick
|
|
7
7
|
from hestia_earth.utils.blank_node import get_node_value
|
|
@@ -117,9 +117,9 @@ def _get_cycle_duration(cycle: dict, land_cover_id: str = None):
|
|
|
117
117
|
cycle_duration = cycle.get('cycleDuration')
|
|
118
118
|
lookup_value = None if cycle_duration or not land_cover_id else safe_parse_float(get_table_value(
|
|
119
119
|
download_lookup("crop.csv"),
|
|
120
|
-
|
|
120
|
+
'landCoverTermId',
|
|
121
121
|
land_cover_id,
|
|
122
|
-
|
|
122
|
+
'maximumCycleDuration'
|
|
123
123
|
), default=None)
|
|
124
124
|
return cycle_duration or lookup_value
|
|
125
125
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from functools import reduce
|
|
2
2
|
from hestia_earth.schema import TermTermType, EmissionMethodTier, SiteSiteType
|
|
3
3
|
from hestia_earth.utils.lookup import (
|
|
4
|
-
download_lookup,
|
|
4
|
+
download_lookup, extract_grouped_data_closest_date, find_term_ids_by
|
|
5
5
|
)
|
|
6
6
|
from hestia_earth.utils.model import filter_list_term_type
|
|
7
7
|
from hestia_earth.utils.tools import non_empty_list, flatten, list_sum, safe_parse_float, omit
|
|
@@ -103,7 +103,7 @@ def _map_group_emissions(group_id: str, required_emission_term_ids: list, emissi
|
|
|
103
103
|
lookup = download_lookup('emission.csv')
|
|
104
104
|
emissions = list(filter(
|
|
105
105
|
lambda id: id in required_emission_term_ids,
|
|
106
|
-
find_term_ids_by(lookup,
|
|
106
|
+
find_term_ids_by(lookup, 'inputProductionGroupId', group_id)
|
|
107
107
|
))
|
|
108
108
|
included_emissions = list(filter(lambda v: v in emission_ids, emissions))
|
|
109
109
|
missing_emissions = list(filter(lambda v: v not in emission_ids, emissions))
|
|
@@ -19,16 +19,18 @@ from . import MODEL
|
|
|
19
19
|
REQUIREMENTS = {
|
|
20
20
|
"Site": {
|
|
21
21
|
"optional": {
|
|
22
|
-
"measurements": [
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
"
|
|
22
|
+
"measurements": [
|
|
23
|
+
{
|
|
24
|
+
"@type": "Measurement",
|
|
25
|
+
"value": "",
|
|
26
|
+
"depthUpper": "",
|
|
27
|
+
"depthLower": "",
|
|
28
|
+
"term.termType": ["soilType", "usdaSoilType"],
|
|
29
|
+
"optional": {
|
|
30
|
+
"dates": ""
|
|
31
|
+
}
|
|
30
32
|
}
|
|
31
|
-
|
|
33
|
+
]
|
|
32
34
|
}
|
|
33
35
|
}
|
|
34
36
|
}
|
|
@@ -41,7 +43,8 @@ RETURNS = {
|
|
|
41
43
|
}]
|
|
42
44
|
}
|
|
43
45
|
LOOKUPS = {
|
|
44
|
-
"soilType": "IPCC_SOIL_CATEGORY"
|
|
46
|
+
"soilType": "IPCC_SOIL_CATEGORY",
|
|
47
|
+
"usdaSoilType": "IPCC_SOIL_CATEGORY"
|
|
45
48
|
}
|
|
46
49
|
TERM_ID = 'organicSoils,mineralSoils'
|
|
47
50
|
|
|
@@ -50,6 +53,10 @@ ORGANIC_SOILS_TERM_ID = MEASUREMENT_TERM_IDS[0]
|
|
|
50
53
|
MINERAL_SOILS_TERM_ID = MEASUREMENT_TERM_IDS[1]
|
|
51
54
|
METHOD = MeasurementMethodClassification.MODELLED_USING_OTHER_MEASUREMENTS.value
|
|
52
55
|
|
|
56
|
+
_INPUT_TERM_TYPES = (
|
|
57
|
+
TermTermType.SOILTYPE,
|
|
58
|
+
TermTermType.USDASOILTYPE
|
|
59
|
+
)
|
|
53
60
|
TARGET_LOOKUP_VALUE = IPCC_SOIL_CATEGORY_TO_SOIL_TYPE_LOOKUP_VALUE[IpccSoilCategory.ORGANIC_SOILS]
|
|
54
61
|
|
|
55
62
|
IS_100_THRESHOLD = 99.5
|
|
@@ -65,6 +72,7 @@ def _measurement(term_id: str, **kwargs):
|
|
|
65
72
|
|
|
66
73
|
class _SoilTypeDatum(NamedTuple):
|
|
67
74
|
term_id: str
|
|
75
|
+
term_type: str
|
|
68
76
|
depth_upper: float
|
|
69
77
|
depth_lower: float
|
|
70
78
|
dates: list[str]
|
|
@@ -105,14 +113,16 @@ def _extract_soil_type_data(node: dict) -> _SoilTypeDatum:
|
|
|
105
113
|
depth_upper = node.get("depthUpper")
|
|
106
114
|
depth_lower = node.get("depthLower")
|
|
107
115
|
depth_interval = (depth_upper, depth_lower)
|
|
116
|
+
term_type = node.get("term", {}).get("termType")
|
|
108
117
|
|
|
109
118
|
return _SoilTypeDatum(
|
|
110
119
|
term_id=node.get("term", {}).get("@id"),
|
|
120
|
+
term_type=term_type,
|
|
111
121
|
depth_upper=depth_upper,
|
|
112
122
|
depth_lower=depth_lower,
|
|
113
123
|
dates=node.get("dates", []),
|
|
114
124
|
value=get_node_value(node),
|
|
115
|
-
is_organic=node_lookup_match(node, LOOKUPS[
|
|
125
|
+
is_organic=node_lookup_match(node, LOOKUPS[term_type], TARGET_LOOKUP_VALUE),
|
|
116
126
|
is_complete_depth=all(depth is not None for depth in depth_interval),
|
|
117
127
|
is_standard_depth=depth_interval in STANDARD_DEPTHS,
|
|
118
128
|
)
|
|
@@ -126,7 +136,7 @@ def _classify_soil_type_data(soil_type_data: list[_SoilTypeDatum]):
|
|
|
126
136
|
|
|
127
137
|
def classify(inventory: _SoilTypeInventory, datum: _SoilTypeDatum) -> _SoilTypeInventory:
|
|
128
138
|
"""
|
|
129
|
-
Sum the values of organic and mineral `soilType` Measurements by depth interval and date.
|
|
139
|
+
Sum the values of organic and mineral `soilType`/`usdaSoilType` Measurements by depth interval and date.
|
|
130
140
|
"""
|
|
131
141
|
keys = _soil_type_data_to_inventory_keys(datum)
|
|
132
142
|
|
|
@@ -252,7 +262,7 @@ def _filter_data_by_depth_availability(data: list[_SoilTypeDatum]):
|
|
|
252
262
|
|
|
253
263
|
def _should_run(site: dict):
|
|
254
264
|
soil_type_nodes = split_nodes_by_dates(
|
|
255
|
-
|
|
265
|
+
get_soil_type_nodes(site)
|
|
256
266
|
)
|
|
257
267
|
|
|
258
268
|
filtered_by, soil_type_data = _filter_data_by_depth_availability(
|
|
@@ -280,6 +290,14 @@ def _should_run(site: dict):
|
|
|
280
290
|
return should_run, inventory
|
|
281
291
|
|
|
282
292
|
|
|
293
|
+
def get_soil_type_nodes(site: dict) -> list[dict]:
|
|
294
|
+
measurements = site.get("measurements", [])
|
|
295
|
+
return next(
|
|
296
|
+
(nodes for term_type in _INPUT_TERM_TYPES if (nodes := filter_list_term_type(measurements, term_type))),
|
|
297
|
+
[]
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
|
|
283
301
|
_INVENTORY_KEY_TO_FIELD_KEY = {
|
|
284
302
|
"depth_upper": "depthUpper",
|
|
285
303
|
"depth_lower": "depthLower",
|