hestia-earth-models 0.75.0__py3-none-any.whl → 0.75.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hestia-earth-models might be problematic. Click here for more details.
- hestia_earth/models/aware/scarcityWeightedWaterUse.py +2 -4
- hestia_earth/models/aware2_0/scarcityWeightedWaterUse.py +2 -5
- hestia_earth/models/chaudharyBrooks2018/utils.py +2 -2
- hestia_earth/models/cml2001Baseline/abioticResourceDepletionFossilFuels.py +3 -2
- hestia_earth/models/cml2001Baseline/abioticResourceDepletionMineralsAndMetals.py +13 -6
- hestia_earth/models/config/Cycle.json +15 -0
- hestia_earth/models/cycle/product/economicValueShare.py +4 -4
- hestia_earth/models/ecoalimV9/utils.py +3 -3
- hestia_earth/models/ecoinventV3/utils.py +2 -2
- hestia_earth/models/ecoinventV3AndEmberClimate/utils.py +2 -2
- hestia_earth/models/emissionNotRelevant/__init__.py +2 -2
- hestia_earth/models/environmentalFootprintV3_1/environmentalFootprintSingleOverallScore.py +1 -2
- hestia_earth/models/environmentalFootprintV3_1/soilQualityIndexLandOccupation.py +8 -5
- hestia_earth/models/faostat2018/utils.py +3 -3
- hestia_earth/models/frischknechtEtAl2000/ionisingRadiationKbqU235Eq.py +5 -4
- hestia_earth/models/geospatialDatabase/ecoClimateZone.py +2 -2
- hestia_earth/models/geospatialDatabase/histosol.py +31 -11
- hestia_earth/models/hestia/aboveGroundCropResidueTotal.py +2 -2
- hestia_earth/models/hestia/landCover_utils.py +8 -12
- hestia_earth/models/hestia/management.py +3 -3
- hestia_earth/models/hestia/seed_emissions.py +2 -2
- hestia_earth/models/hestia/soilClassification.py +31 -13
- hestia_earth/models/ipcc2019/animal/pastureGrass.py +3 -1
- hestia_earth/models/ipcc2019/burning_utils.py +406 -4
- hestia_earth/models/ipcc2019/ch4ToAirEntericFermentation.py +12 -9
- hestia_earth/models/ipcc2019/ch4ToAirExcreta.py +28 -10
- hestia_earth/models/ipcc2019/ch4ToAirOrganicSoilCultivation.py +8 -11
- hestia_earth/models/ipcc2019/co2ToAirOrganicSoilCultivation.py +9 -12
- hestia_earth/models/ipcc2019/emissionsToAirOrganicSoilBurning.py +516 -0
- hestia_earth/models/ipcc2019/n2OToAirOrganicSoilCultivationDirect.py +10 -13
- hestia_earth/models/ipcc2019/nonCo2EmissionsToAirNaturalVegetationBurning.py +56 -433
- hestia_earth/models/ipcc2019/organicSoilCultivation_utils.py +2 -2
- hestia_earth/models/ipcc2019/pastureGrass.py +3 -1
- hestia_earth/models/ipcc2019/pastureGrass_utils.py +6 -3
- hestia_earth/models/ipcc2019/utils.py +3 -2
- hestia_earth/models/linkedImpactAssessment/emissions.py +2 -2
- hestia_earth/models/mocking/search-results.json +1 -1
- hestia_earth/models/requirements.py +2 -2
- hestia_earth/models/utils/aggregated.py +2 -2
- hestia_earth/models/utils/background_emissions.py +6 -5
- hestia_earth/models/utils/blank_node.py +68 -0
- hestia_earth/models/utils/ecoClimateZone.py +7 -8
- hestia_earth/models/utils/excretaManagement.py +3 -3
- hestia_earth/models/utils/feedipedia.py +7 -7
- hestia_earth/models/utils/impact_assessment.py +3 -0
- hestia_earth/models/utils/input.py +2 -2
- hestia_earth/models/utils/liveAnimal.py +4 -4
- hestia_earth/models/utils/lookup.py +15 -20
- hestia_earth/models/utils/property.py +3 -3
- hestia_earth/models/utils/term.py +5 -5
- hestia_earth/models/version.py +1 -1
- hestia_earth/orchestrator/models/transformations.py +2 -2
- hestia_earth/orchestrator/strategies/merge/merge_node.py +32 -2
- {hestia_earth_models-0.75.0.dist-info → hestia_earth_models-0.75.2.dist-info}/METADATA +2 -2
- {hestia_earth_models-0.75.0.dist-info → hestia_earth_models-0.75.2.dist-info}/RECORD +58 -57
- {hestia_earth_models-0.75.0.dist-info → hestia_earth_models-0.75.2.dist-info}/WHEEL +0 -0
- {hestia_earth_models-0.75.0.dist-info → hestia_earth_models-0.75.2.dist-info}/licenses/LICENSE +0 -0
- {hestia_earth_models-0.75.0.dist-info → hestia_earth_models-0.75.2.dist-info}/top_level.txt +0 -0
|
@@ -4,7 +4,7 @@ from functools import reduce
|
|
|
4
4
|
from importlib import import_module
|
|
5
5
|
from pydash.objects import merge, merge_with, omit
|
|
6
6
|
from hestia_earth.schema import is_schema_type, is_type_valid, NodeType
|
|
7
|
-
from hestia_earth.utils.lookup import download_lookup, get_table_value
|
|
7
|
+
from hestia_earth.utils.lookup import download_lookup, get_table_value
|
|
8
8
|
from hestia_earth.utils.api import download_hestia
|
|
9
9
|
from hestia_earth.utils.tools import flatten, non_empty_list
|
|
10
10
|
|
|
@@ -87,7 +87,7 @@ ALL_MODELS = _models(CURRENT_DIR)
|
|
|
87
87
|
def _lookup_allowed(model_term_id: str, column: str, value: str):
|
|
88
88
|
term = download_hestia(model_term_id)
|
|
89
89
|
lookup = download_lookup(f"{term.get('termType')}.csv")
|
|
90
|
-
values = get_table_value(lookup, '
|
|
90
|
+
values = get_table_value(lookup, 'term.id', model_term_id, column)
|
|
91
91
|
allowed_values = (values or 'all').split(';')
|
|
92
92
|
return 'all' in allowed_values or value in allowed_values
|
|
93
93
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import math
|
|
2
2
|
from hestia_earth.schema import NodeType
|
|
3
|
-
from hestia_earth.utils.lookup import download_lookup, get_table_value
|
|
3
|
+
from hestia_earth.utils.lookup import download_lookup, get_table_value
|
|
4
4
|
from hestia_earth.utils.model import find_term_match, linked_node
|
|
5
5
|
from hestia_earth.utils.tools import safe_parse_date, non_empty_list
|
|
6
6
|
|
|
@@ -52,7 +52,7 @@ def link_inputs_to_impact(model: str, cycle: dict, inputs: list, **log_args):
|
|
|
52
52
|
|
|
53
53
|
def _should_not_skip_input(term: dict):
|
|
54
54
|
lookup = download_lookup(f"{term.get('termType')}.csv", True)
|
|
55
|
-
value = get_table_value(lookup, '
|
|
55
|
+
value = get_table_value(lookup, 'term.id', term.get('@id'), 'skipLinkedImpactAssessment')
|
|
56
56
|
return True if value is None or value == '' else not value
|
|
57
57
|
|
|
58
58
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from functools import reduce
|
|
2
2
|
from typing import Callable, Tuple
|
|
3
3
|
from hestia_earth.schema import TermTermType, EmissionMethodTier
|
|
4
|
-
from hestia_earth.utils.lookup import
|
|
4
|
+
from hestia_earth.utils.lookup import is_missing_value, lookup_columns
|
|
5
5
|
from hestia_earth.utils.model import find_term_match, filter_list_term_type
|
|
6
6
|
from hestia_earth.utils.tools import flatten, non_empty_list, safe_parse_float, omit
|
|
7
7
|
from hestia_earth.utils.emission import cycle_emissions_in_system_boundary
|
|
@@ -121,19 +121,20 @@ def _values_from_column(index_column: str, column: str, value: str):
|
|
|
121
121
|
column != index_column,
|
|
122
122
|
not column.startswith('ecoinvent'),
|
|
123
123
|
not column.startswith('ecoalim'),
|
|
124
|
-
not
|
|
124
|
+
not is_missing_value(value)
|
|
125
125
|
]) else {}
|
|
126
126
|
|
|
127
127
|
|
|
128
128
|
def convert_background_lookup(lookup, index_column: str):
|
|
129
129
|
columns = lookup_columns(lookup)
|
|
130
|
+
indexed_df = lookup.set_index(index_column, drop=False).copy()
|
|
130
131
|
return {
|
|
131
|
-
|
|
132
|
-
lambda prev, curr: prev | _values_from_column(index_column, curr,
|
|
132
|
+
index_key: reduce(
|
|
133
|
+
lambda prev, curr: prev | _values_from_column(index_column, curr, row_data[curr]),
|
|
133
134
|
columns,
|
|
134
135
|
{}
|
|
135
136
|
)
|
|
136
|
-
for
|
|
137
|
+
for index_key, row_data in indexed_df.to_dict('index').items()
|
|
137
138
|
}
|
|
138
139
|
|
|
139
140
|
|
|
@@ -16,6 +16,7 @@ from dateutil import parser
|
|
|
16
16
|
from dateutil.relativedelta import relativedelta
|
|
17
17
|
from hestia_earth.schema import TermTermType
|
|
18
18
|
from hestia_earth.utils.blank_node import ArrayTreatment, get_node_value
|
|
19
|
+
from hestia_earth.utils.date import diff_in_days
|
|
19
20
|
from hestia_earth.utils.model import filter_list_term_type
|
|
20
21
|
from hestia_earth.utils.tools import (
|
|
21
22
|
flatten,
|
|
@@ -532,6 +533,10 @@ def node_term_match(
|
|
|
532
533
|
return node.get('term', {}).get('@id', None) in target_term_ids
|
|
533
534
|
|
|
534
535
|
|
|
536
|
+
def filter_list_term_id(nodes: list[dict], target_term_ids: Union[str, set[str]]) -> list[dict]:
|
|
537
|
+
return [node for node in nodes if node_term_match(node, target_term_ids)]
|
|
538
|
+
|
|
539
|
+
|
|
535
540
|
def node_lookup_match(
|
|
536
541
|
node: dict,
|
|
537
542
|
lookup: str,
|
|
@@ -1654,3 +1659,66 @@ def validate_start_date_end_date(node: dict) -> bool:
|
|
|
1654
1659
|
end_date = _gapfill_datestr(node.get("endDate", OLDEST_DATE), DatestrGapfillMode.END)
|
|
1655
1660
|
|
|
1656
1661
|
return safe_parse_date(start_date) < safe_parse_date(end_date)
|
|
1662
|
+
|
|
1663
|
+
|
|
1664
|
+
def closest_depth(
|
|
1665
|
+
nodes: list[dict], target_depth_upper: float, target_depth_lower: float, depth_strict: bool = True
|
|
1666
|
+
) -> list[dict]:
|
|
1667
|
+
DEFAULT_KEY = (None, None)
|
|
1668
|
+
|
|
1669
|
+
def group_by(result: dict, node: dict) -> dict:
|
|
1670
|
+
upper = node.get("depthUpper")
|
|
1671
|
+
lower = node.get("depthLower")
|
|
1672
|
+
key = (upper, lower)
|
|
1673
|
+
|
|
1674
|
+
update_dict = {key: result.get(key, []) + [node]}
|
|
1675
|
+
return result | update_dict
|
|
1676
|
+
|
|
1677
|
+
def depth_distance(key: tuple[Optional[float], Optional[float]]) -> float:
|
|
1678
|
+
return sum(
|
|
1679
|
+
abs(depth - target if isinstance(depth, (float, int)) else 9999)
|
|
1680
|
+
for depth, target in zip(key, (target_depth_upper, target_depth_lower))
|
|
1681
|
+
)
|
|
1682
|
+
|
|
1683
|
+
grouped = reduce(group_by, nodes, {})
|
|
1684
|
+
nearest_key = min(grouped.keys(), key=depth_distance, default=DEFAULT_KEY)
|
|
1685
|
+
|
|
1686
|
+
return (
|
|
1687
|
+
grouped.get(nearest_key, []) if depth_distance(nearest_key) <= 0 or not depth_strict else []
|
|
1688
|
+
)
|
|
1689
|
+
|
|
1690
|
+
|
|
1691
|
+
def closest_end_date(
|
|
1692
|
+
nodes: list[dict], target_date: str, mode: DatestrGapfillMode = DatestrGapfillMode.START
|
|
1693
|
+
) -> list[dict]:
|
|
1694
|
+
DEFAULT_KEY = "no-dates"
|
|
1695
|
+
|
|
1696
|
+
def date_distance(date: str) -> float:
|
|
1697
|
+
date_ = date if date != DEFAULT_KEY else OLDEST_DATE
|
|
1698
|
+
return abs(diff_in_days(
|
|
1699
|
+
_gapfill_datestr(date_, mode),
|
|
1700
|
+
_gapfill_datestr(target_date, mode)
|
|
1701
|
+
))
|
|
1702
|
+
|
|
1703
|
+
grouped = group_nodes_by_last_date(nodes)
|
|
1704
|
+
nearest_key = min(grouped.keys(), key=date_distance, default=DEFAULT_KEY)
|
|
1705
|
+
|
|
1706
|
+
return grouped.get(nearest_key, [])
|
|
1707
|
+
|
|
1708
|
+
|
|
1709
|
+
def pick_shallowest(nodes: list[dict], default=None):
|
|
1710
|
+
return (
|
|
1711
|
+
default if len(nodes) == 0 else _shallowest_node(nodes) if len(nodes) > 1 else nodes[0]
|
|
1712
|
+
)
|
|
1713
|
+
|
|
1714
|
+
|
|
1715
|
+
def select_nodes_by(nodes: list[dict], filters: list[Callable[[list[dict]], list[dict]]]) -> Union[dict, list[dict]]:
|
|
1716
|
+
"""
|
|
1717
|
+
Applies a series of filters to a list of blank nodes. Filters are applied in the order they are specifed in the
|
|
1718
|
+
filters arg.
|
|
1719
|
+
"""
|
|
1720
|
+
return reduce(
|
|
1721
|
+
lambda result, func: func(result),
|
|
1722
|
+
filters,
|
|
1723
|
+
nodes
|
|
1724
|
+
)
|
|
@@ -1,11 +1,9 @@
|
|
|
1
1
|
from enum import Enum
|
|
2
2
|
from functools import reduce
|
|
3
3
|
from typing import Any, Optional, Union
|
|
4
|
-
|
|
5
4
|
from hestia_earth.schema import NodeType
|
|
6
|
-
|
|
7
5
|
from hestia_earth.utils.blank_node import get_node_value
|
|
8
|
-
from hestia_earth.utils.lookup import download_lookup,
|
|
6
|
+
from hestia_earth.utils.lookup import download_lookup, get_table_value, extract_grouped_data
|
|
9
7
|
from hestia_earth.utils.model import find_term_match
|
|
10
8
|
from hestia_earth.utils.tools import safe_parse_float
|
|
11
9
|
|
|
@@ -84,10 +82,11 @@ def get_ecoClimateZone_lookup_value(eco_climate_zone: str, col_name: str, group_
|
|
|
84
82
|
"""
|
|
85
83
|
try:
|
|
86
84
|
lookup = download_lookup('ecoClimateZone.csv')
|
|
87
|
-
code = int(
|
|
88
|
-
data =
|
|
85
|
+
code = int(eco_climate_zone)
|
|
86
|
+
data = get_table_value(lookup, 'ecoClimateZone', code, col_name)
|
|
89
87
|
return safe_parse_float(
|
|
90
|
-
data if group_name is None else extract_grouped_data(data, group_name),
|
|
88
|
+
data if group_name is None else extract_grouped_data(data, group_name),
|
|
89
|
+
default=None
|
|
91
90
|
)
|
|
92
91
|
except Exception:
|
|
93
92
|
return 0
|
|
@@ -115,8 +114,8 @@ def get_ecoClimateZone_lookup_grouped_value(
|
|
|
115
114
|
"""
|
|
116
115
|
try:
|
|
117
116
|
lookup = download_lookup('ecoClimateZone.csv')
|
|
118
|
-
code = int(
|
|
119
|
-
data =
|
|
117
|
+
code = int(eco_climate_zone)
|
|
118
|
+
data = get_table_value(lookup, 'ecoClimateZone', code, col_name)
|
|
120
119
|
grouped_data = reduce(
|
|
121
120
|
lambda prev, curr: prev | {curr.split(':')[0]: safe_parse_float(curr.split(':')[1], default=None)},
|
|
122
121
|
data.split(';'),
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from hestia_earth.schema import TermTermType
|
|
2
2
|
from hestia_earth.utils.model import filter_list_term_type
|
|
3
|
-
from hestia_earth.utils.lookup import
|
|
3
|
+
from hestia_earth.utils.lookup import download_lookup, get_table_value
|
|
4
4
|
from hestia_earth.utils.tools import safe_parse_float, list_sum
|
|
5
5
|
|
|
6
6
|
from hestia_earth.models.log import debugValues, debugMissingLookup, log_as_table
|
|
@@ -17,8 +17,8 @@ def get_lookup_factor(practices: list, lookup_col: str):
|
|
|
17
17
|
|
|
18
18
|
def _get_nh3_factor(lookup_name: str, term_id: str, input: dict, **log_args):
|
|
19
19
|
input_term_id = input.get('term', {}).get('@id')
|
|
20
|
-
value = get_table_value(download_lookup(lookup_name), '
|
|
21
|
-
debugMissingLookup(lookup_name, '
|
|
20
|
+
value = get_table_value(download_lookup(lookup_name), 'term.id', term_id, input_term_id)
|
|
21
|
+
debugMissingLookup(lookup_name, 'term.id', term_id, input_term_id, value, **log_args)
|
|
22
22
|
return safe_parse_float(value, default=None)
|
|
23
23
|
|
|
24
24
|
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from hestia_earth.utils.model import find_term_match
|
|
2
|
-
from hestia_earth.utils.lookup import download_lookup, get_table_value,
|
|
2
|
+
from hestia_earth.utils.lookup import download_lookup, get_table_value, lookup_term_ids
|
|
3
3
|
from hestia_earth.utils.tools import non_empty_list, safe_parse_float
|
|
4
4
|
|
|
5
5
|
from hestia_earth.models.log import logShouldRun
|
|
@@ -13,14 +13,14 @@ def get_feedipedia_properties():
|
|
|
13
13
|
lookup = download_lookup('property.csv')
|
|
14
14
|
term_ids = [
|
|
15
15
|
term_id for term_id in lookup_term_ids(lookup)
|
|
16
|
-
if get_table_value(lookup, '
|
|
16
|
+
if get_table_value(lookup, 'term.id', term_id, 'feedipediaName')
|
|
17
17
|
]
|
|
18
18
|
return term_ids
|
|
19
19
|
|
|
20
20
|
|
|
21
21
|
def _should_rescale_by_dm(property_id: str):
|
|
22
22
|
lookup = download_lookup('property.csv')
|
|
23
|
-
value = get_table_value(lookup, '
|
|
23
|
+
value = get_table_value(lookup, 'term.id', property_id, 'feedipediaConversionEnum')
|
|
24
24
|
return 'dm' in value
|
|
25
25
|
|
|
26
26
|
|
|
@@ -40,10 +40,10 @@ def _dm_property(term_id: str, property_values: dict, dm_property_values: dict,
|
|
|
40
40
|
|
|
41
41
|
|
|
42
42
|
def _map_properties(lookup, term_id: str, column_prefix: str):
|
|
43
|
-
value = get_table_value(lookup, '
|
|
44
|
-
sd = get_table_value(lookup, '
|
|
45
|
-
min = get_table_value(lookup, '
|
|
46
|
-
max = get_table_value(lookup, '
|
|
43
|
+
value = get_table_value(lookup, 'term.id', term_id, column_prefix)
|
|
44
|
+
sd = get_table_value(lookup, 'term.id', term_id, f"{column_prefix}-sd")
|
|
45
|
+
min = get_table_value(lookup, 'term.id', term_id, f"{column_prefix}-min")
|
|
46
|
+
max = get_table_value(lookup, 'term.id', term_id, f"{column_prefix}-max")
|
|
47
47
|
return {'value': value, 'sd': sd, 'min': min, 'max': max}
|
|
48
48
|
|
|
49
49
|
|
|
@@ -226,6 +226,9 @@ def impact_aware_value(model: str, term_id: str, impact: dict, lookup: str, grou
|
|
|
226
226
|
"""
|
|
227
227
|
# use cache version which is grouped
|
|
228
228
|
blank_nodes = get_emissionsResourceUse(impact)
|
|
229
|
+
term_type = TermTermType.RESOURCEUSE.value if 'resourceUse' in lookup else TermTermType.EMISSION.value
|
|
230
|
+
blank_nodes = filter_list_term_type(blank_nodes, term_type)
|
|
231
|
+
|
|
229
232
|
aware_id = get_site(impact).get('awareWaterBasinId')
|
|
230
233
|
|
|
231
234
|
return None if aware_id is None else all_factor_value(
|
|
@@ -2,7 +2,7 @@ from typing import Union, Optional, List
|
|
|
2
2
|
from hestia_earth.schema import SchemaType, TermTermType
|
|
3
3
|
from hestia_earth.utils.model import find_term_match, linked_node, filter_list_term_type
|
|
4
4
|
from hestia_earth.utils.tools import list_sum, non_empty_list, list_average, flatten
|
|
5
|
-
from hestia_earth.utils.lookup import download_lookup, get_table_value
|
|
5
|
+
from hestia_earth.utils.lookup import download_lookup, get_table_value
|
|
6
6
|
from hestia_earth.utils.term import download_term
|
|
7
7
|
|
|
8
8
|
from hestia_earth.models.log import logger
|
|
@@ -106,7 +106,7 @@ def match_lookup_value(input: dict, col_name: str, col_value):
|
|
|
106
106
|
term_type = input.get('term', {}).get('termType')
|
|
107
107
|
lookup = download_lookup(f"{term_type}.csv")
|
|
108
108
|
term_id = input.get('term', {}).get('@id')
|
|
109
|
-
return get_table_value(lookup, '
|
|
109
|
+
return get_table_value(lookup, 'term.id', term_id, col_name) == col_value
|
|
110
110
|
|
|
111
111
|
|
|
112
112
|
def get_feed_inputs(cycle: dict):
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from statistics import mean
|
|
2
2
|
from hestia_earth.schema import TermTermType
|
|
3
|
-
from hestia_earth.utils.lookup import download_lookup, get_table_value,
|
|
3
|
+
from hestia_earth.utils.lookup import download_lookup, get_table_value, extract_grouped_data
|
|
4
4
|
from hestia_earth.utils.model import find_primary_product, filter_list_term_type
|
|
5
5
|
from hestia_earth.utils.tools import safe_parse_float
|
|
6
6
|
|
|
@@ -30,9 +30,9 @@ def get_default_digestibility(model: str, term_id: str, cycle: dict):
|
|
|
30
30
|
|
|
31
31
|
for system in systems:
|
|
32
32
|
system_id = system.get('term', {}).get('@id')
|
|
33
|
-
lookup_col =
|
|
34
|
-
value = get_table_value(lookup, '
|
|
35
|
-
debugMissingLookup(lookup_name, '
|
|
33
|
+
lookup_col = product_id
|
|
34
|
+
value = get_table_value(lookup, 'term.id', system_id, lookup_col)
|
|
35
|
+
debugMissingLookup(lookup_name, 'term.id', term_id, lookup_col, value, model=model, term=term_id)
|
|
36
36
|
min = safe_parse_float(extract_grouped_data(value, 'min'), default=None)
|
|
37
37
|
max = safe_parse_float(extract_grouped_data(value, 'max'), default=None)
|
|
38
38
|
if min and max:
|
|
@@ -1,12 +1,6 @@
|
|
|
1
1
|
from functools import lru_cache
|
|
2
2
|
from typing import Optional, List
|
|
3
|
-
from hestia_earth.utils.lookup import
|
|
4
|
-
download_lookup,
|
|
5
|
-
get_table_value,
|
|
6
|
-
column_name,
|
|
7
|
-
extract_grouped_data,
|
|
8
|
-
_get_single_table_value
|
|
9
|
-
)
|
|
3
|
+
from hestia_earth.utils.lookup import download_lookup, get_table_value, extract_grouped_data, is_missing_value
|
|
10
4
|
from hestia_earth.utils.tools import list_sum, safe_parse_float
|
|
11
5
|
|
|
12
6
|
from ..log import debugValues, log_as_table, debugMissingLookup
|
|
@@ -42,9 +36,9 @@ def _factor_value(
|
|
|
42
36
|
)
|
|
43
37
|
# value is either a number or matching between a model and a value (restrict value to specific model only)
|
|
44
38
|
return safe_parse_float(
|
|
45
|
-
extract_grouped_data(coefficient, grouped_data_key),
|
|
39
|
+
extract_grouped_data(coefficient, grouped_data_key) if ':' in str(coefficient) else coefficient,
|
|
46
40
|
default=None
|
|
47
|
-
)
|
|
41
|
+
)
|
|
48
42
|
|
|
49
43
|
def get_value(blank_node: dict):
|
|
50
44
|
node_term_id = blank_node.get('term', {}).get('@id')
|
|
@@ -111,15 +105,15 @@ def aware_factor_value(
|
|
|
111
105
|
default_world_value: Optional[bool] = False
|
|
112
106
|
):
|
|
113
107
|
lookup = download_lookup(lookup_name, False) # avoid saving in memory as there could be many different files used
|
|
114
|
-
lookup_col =
|
|
108
|
+
lookup_col = 'awareWaterBasinId'
|
|
115
109
|
|
|
116
110
|
@lru_cache()
|
|
117
111
|
def get_coefficient(node_term_id: str):
|
|
118
|
-
coefficient =
|
|
112
|
+
coefficient = get_table_value(lookup, lookup_col, int(aware_id), node_term_id)
|
|
119
113
|
return safe_parse_float(
|
|
120
|
-
extract_grouped_data(coefficient, group_key),
|
|
114
|
+
extract_grouped_data(coefficient, group_key) if group_key else coefficient,
|
|
121
115
|
default=None
|
|
122
|
-
)
|
|
116
|
+
)
|
|
123
117
|
|
|
124
118
|
def get_value(blank_node: dict):
|
|
125
119
|
node_term_id = blank_node.get('term', {}).get('@id')
|
|
@@ -167,7 +161,7 @@ def all_factor_value(
|
|
|
167
161
|
term_id, region_id = missing_value
|
|
168
162
|
debugMissingLookup(
|
|
169
163
|
lookup_name=lookup_name,
|
|
170
|
-
row='
|
|
164
|
+
row='term.id',
|
|
171
165
|
row_value=region_id or term_id,
|
|
172
166
|
col=term_id if region_id else lookup_col,
|
|
173
167
|
value=None,
|
|
@@ -195,18 +189,19 @@ def all_factor_value(
|
|
|
195
189
|
|
|
196
190
|
def get_region_lookup(lookup_name: str, term_id: str):
|
|
197
191
|
# for performance, try to load the region specific lookup if exists
|
|
198
|
-
|
|
199
|
-
download_lookup(lookup_name.replace('region-', f"{term_id}-")
|
|
192
|
+
lookup = (
|
|
193
|
+
download_lookup(lookup_name.replace('region-', f"{term_id}-"))
|
|
200
194
|
if lookup_name and lookup_name.startswith('region-') else None
|
|
201
|
-
)
|
|
195
|
+
)
|
|
196
|
+
return lookup if lookup is not None and not lookup.empty else download_lookup(lookup_name)
|
|
202
197
|
|
|
203
198
|
|
|
204
199
|
@lru_cache()
|
|
205
200
|
def get_region_lookup_value(lookup_name: str, term_id: str, column: str, fallback_world: bool = False, **log_args):
|
|
206
201
|
# for performance, try to load the region specific lookup if exists
|
|
207
202
|
lookup = get_region_lookup(lookup_name, term_id)
|
|
208
|
-
value = get_table_value(lookup, '
|
|
209
|
-
if value
|
|
203
|
+
value = get_table_value(lookup, 'term.id', term_id, column)
|
|
204
|
+
if is_missing_value(value) and fallback_world:
|
|
210
205
|
return get_region_lookup_value(lookup_name, 'region-world', column, **log_args)
|
|
211
|
-
debugMissingLookup(lookup_name, '
|
|
206
|
+
debugMissingLookup(lookup_name, 'term.id', term_id, column, value, **log_args)
|
|
212
207
|
return value
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from functools import lru_cache
|
|
2
2
|
from hestia_earth.schema import SchemaType, TermTermType
|
|
3
|
-
from hestia_earth.utils.lookup import download_lookup, extract_grouped_data, get_table_value
|
|
3
|
+
from hestia_earth.utils.lookup import download_lookup, extract_grouped_data, get_table_value
|
|
4
4
|
from hestia_earth.utils.model import find_term_match, linked_node
|
|
5
5
|
from hestia_earth.utils.tools import list_sum, safe_parse_float
|
|
6
6
|
from hestia_earth.utils.term import download_term
|
|
@@ -104,11 +104,11 @@ def node_has_property(term_id: str):
|
|
|
104
104
|
def node_property_lookup_value(model: str, term_id: str, term_type: str, prop_id: str, default=None, **log_args):
|
|
105
105
|
try:
|
|
106
106
|
lookup_name = f"{term_type}-property.csv"
|
|
107
|
-
lookup_value = get_table_value(download_lookup(lookup_name), '
|
|
107
|
+
lookup_value = get_table_value(download_lookup(lookup_name), 'term.id', term_id, prop_id)
|
|
108
108
|
value = extract_grouped_data(lookup_value, 'Avg') if (
|
|
109
109
|
isinstance(lookup_value, str) and 'Avg' in lookup_value
|
|
110
110
|
) else lookup_value
|
|
111
|
-
debugMissingLookup(lookup_name, '
|
|
111
|
+
debugMissingLookup(lookup_name, 'term.id', term_id, prop_id, value, model=model, **log_args)
|
|
112
112
|
return safe_parse_float(value, default=None)
|
|
113
113
|
except Exception:
|
|
114
114
|
value = get_lookup_value({'@id': term_id, 'termType': term_type}, prop_id, skip_debug=True)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from hestia_earth.schema import SchemaType, TermTermType, SiteSiteType
|
|
2
2
|
from hestia_earth.utils.api import find_node, search
|
|
3
|
-
from hestia_earth.utils.lookup import download_lookup, get_table_value
|
|
3
|
+
from hestia_earth.utils.lookup import download_lookup, get_table_value
|
|
4
4
|
|
|
5
5
|
from .constant import Units
|
|
6
6
|
from ..log import debugMissingLookup
|
|
@@ -9,15 +9,15 @@ from ..log import debugMissingLookup
|
|
|
9
9
|
LIMIT = 9999
|
|
10
10
|
|
|
11
11
|
|
|
12
|
-
def get_lookup_value(lookup_term: dict, column: str, skip_debug: bool = False, **log_args):
|
|
12
|
+
def get_lookup_value(lookup_term: dict, column: str, skip_debug: bool = False, default_value='', **log_args):
|
|
13
13
|
table_name = f"{lookup_term.get('termType')}.csv" if lookup_term else None
|
|
14
14
|
value = get_table_value(
|
|
15
|
-
download_lookup(table_name), '
|
|
15
|
+
download_lookup(table_name), 'term.id', lookup_term.get('@id'), column
|
|
16
16
|
) if table_name else None
|
|
17
17
|
debugMissingLookup(
|
|
18
|
-
table_name, '
|
|
18
|
+
table_name, 'term.id', lookup_term.get('@id'), column, value, **log_args
|
|
19
19
|
) if lookup_term and not skip_debug else None
|
|
20
|
-
return value
|
|
20
|
+
return default_value if value is None else value
|
|
21
21
|
|
|
22
22
|
|
|
23
23
|
def get_liquid_fuel_terms():
|
hestia_earth/models/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
VERSION = '0.75.
|
|
1
|
+
VERSION = '0.75.2'
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from copy import deepcopy
|
|
2
2
|
from functools import reduce
|
|
3
3
|
from hestia_earth.schema import CompletenessJSONLD
|
|
4
|
-
from hestia_earth.utils.lookup import download_lookup, get_table_value
|
|
4
|
+
from hestia_earth.utils.lookup import download_lookup, get_table_value
|
|
5
5
|
from hestia_earth.models.transformation.input.utils import replace_input_field
|
|
6
6
|
from hestia_earth.models.utils.transformation import previous_transformation
|
|
7
7
|
from hestia_earth.models.utils.product import find_by_product
|
|
@@ -22,7 +22,7 @@ def _include_practice(practice: dict):
|
|
|
22
22
|
term_type = term.get('termType')
|
|
23
23
|
term_id = term.get('@id')
|
|
24
24
|
lookup = download_lookup(f"{term_type}.csv")
|
|
25
|
-
value = get_table_value(lookup, '
|
|
25
|
+
value = get_table_value(lookup, 'term.id', term_id, 'includeForTransformation')
|
|
26
26
|
return False if value is None or value == '' or not value else True
|
|
27
27
|
|
|
28
28
|
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import pydash
|
|
2
|
-
from hestia_earth.schema import EmissionMethodTier
|
|
2
|
+
from hestia_earth.schema import SCHEMA_TYPES, EmissionMethodTier
|
|
3
|
+
from hestia_earth.utils.tools import non_empty_list
|
|
3
4
|
|
|
4
5
|
from hestia_earth.orchestrator.log import logger, logShouldMerge
|
|
5
6
|
from hestia_earth.orchestrator.utils import update_node_version, _average
|
|
@@ -47,6 +48,35 @@ _MERGE_FROM_ARGS = {
|
|
|
47
48
|
}
|
|
48
49
|
|
|
49
50
|
|
|
51
|
+
def _merge_blank_node(source: dict, dest: dict):
|
|
52
|
+
# handle merging new value without min/max when source contained min/max
|
|
53
|
+
min_values = non_empty_list([
|
|
54
|
+
dest.get('value')[0] if isinstance(dest.get('value'), list) else None,
|
|
55
|
+
dest.get('min')[0] if isinstance(dest.get('min'), list) else None,
|
|
56
|
+
source.get('min')[0]
|
|
57
|
+
]) if isinstance(source.get('min'), list) else None
|
|
58
|
+
min_value = min(min_values) if min_values else None
|
|
59
|
+
|
|
60
|
+
max_values = non_empty_list([
|
|
61
|
+
dest.get('value')[0] if isinstance(dest.get('value'), list) else None,
|
|
62
|
+
dest.get('max')[0] if isinstance(dest.get('max'), list) else None,
|
|
63
|
+
source.get('max')[0]
|
|
64
|
+
]) if isinstance(source.get('max'), list) else None
|
|
65
|
+
max_value = max(max_values) if max_values else None
|
|
66
|
+
|
|
67
|
+
return (
|
|
68
|
+
{'min': [min_value]} if min_value else {}
|
|
69
|
+
) | (
|
|
70
|
+
{'max': [max_value]} if max_value else {}
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _merge_data(source: dict, dest: dict):
|
|
75
|
+
is_blank_node = dest.get('@type') in SCHEMA_TYPES
|
|
76
|
+
result = pydash.objects.merge({}, source, dest)
|
|
77
|
+
return result | (_merge_blank_node(source, dest) if is_blank_node else {})
|
|
78
|
+
|
|
79
|
+
|
|
50
80
|
def merge(source: dict, dest: dict, version: str, model: dict = {}, merge_args: dict = {}, *args):
|
|
51
81
|
merge_args = {
|
|
52
82
|
key: func(source, dest, merge_args) for key, func in _MERGE_FROM_ARGS.items()
|
|
@@ -56,4 +86,4 @@ def merge(source: dict, dest: dict, version: str, model: dict = {}, merge_args:
|
|
|
56
86
|
should_merge = all([v for _k, v in merge_args.items()])
|
|
57
87
|
logShouldMerge(source, model.get('model'), term_id, should_merge, key=model.get('key'), value=term_id, **merge_args)
|
|
58
88
|
|
|
59
|
-
return update_node_version(version,
|
|
89
|
+
return update_node_version(version, _merge_data(source or {}, dest), source) if should_merge else source
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: hestia_earth_models
|
|
3
|
-
Version: 0.75.
|
|
3
|
+
Version: 0.75.2
|
|
4
4
|
Summary: HESTIA's set of modules for filling gaps in the activity data using external datasets (e.g. populating soil properties with a geospatial dataset using provided coordinates) and internal lookups (e.g. populating machinery use from fuel use). Includes rules for when gaps should be filled versus not (e.g. never gap fill yield, gap fill crop residue if yield provided etc.).
|
|
5
5
|
Home-page: https://gitlab.com/hestia-earth/hestia-engine-models
|
|
6
6
|
Author: HESTIA Team
|
|
@@ -11,7 +11,7 @@ Requires-Python: >=3.12
|
|
|
11
11
|
Description-Content-Type: text/markdown
|
|
12
12
|
License-File: LICENSE
|
|
13
13
|
Requires-Dist: hestia-earth-schema<36.0.0,>=35.0.1
|
|
14
|
-
Requires-Dist: hestia-earth-utils>=0.16.
|
|
14
|
+
Requires-Dist: hestia-earth-utils>=0.16.7
|
|
15
15
|
Requires-Dist: CurrencyConverter==0.16.8
|
|
16
16
|
Requires-Dist: haversine>=2.7.0
|
|
17
17
|
Requires-Dist: pydash
|