hestia-earth-models 0.65.9__py3-none-any.whl → 0.65.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hestia_earth/models/cml2001Baseline/abioticResourceDepletionFossilFuels.py +3 -5
- hestia_earth/models/cml2001Baseline/abioticResourceDepletionMineralsAndMetals.py +1 -1
- hestia_earth/models/config/Cycle.json +9 -9
- hestia_earth/models/config/Site.json +18 -0
- hestia_earth/models/config/__init__.py +6 -0
- hestia_earth/models/cycle/materialAndSubstrate.py +1 -1
- hestia_earth/models/cycle/milkYield.py +9 -6
- hestia_earth/models/cycle/product/economicValueShare.py +8 -4
- hestia_earth/models/cycle/product/revenue.py +11 -7
- hestia_earth/models/faostat2018/product/price.py +1 -1
- hestia_earth/models/geospatialDatabase/utils.py +22 -17
- hestia_earth/models/hestia/landCover.py +15 -5
- hestia_earth/models/hestia/resourceUse_utils.py +24 -12
- hestia_earth/models/hestia/utils.py +1 -2
- hestia_earth/models/ipcc2019/animal/pastureGrass.py +1 -1
- hestia_earth/models/ipcc2019/pastureGrass.py +1 -1
- hestia_earth/models/mocking/search-results.json +914 -914
- hestia_earth/models/pooreNemecek2018/freshwaterWithdrawalsDuringCycle.py +0 -1
- hestia_earth/models/pooreNemecek2018/landOccupationDuringCycle.py +13 -10
- hestia_earth/models/site/defaultMethodClassification.py +35 -0
- hestia_earth/models/site/defaultMethodClassificationDescription.py +39 -0
- hestia_earth/models/site/management.py +30 -19
- hestia_earth/models/utils/blank_node.py +1 -1
- hestia_earth/models/utils/crop.py +1 -1
- hestia_earth/models/utils/cycle.py +3 -3
- hestia_earth/models/utils/impact_assessment.py +5 -3
- hestia_earth/models/version.py +1 -1
- {hestia_earth_models-0.65.9.dist-info → hestia_earth_models-0.65.11.dist-info}/METADATA +2 -2
- {hestia_earth_models-0.65.9.dist-info → hestia_earth_models-0.65.11.dist-info}/RECORD +45 -41
- tests/models/cml2001Baseline/test_abioticResourceDepletionFossilFuels.py +2 -16
- tests/models/cml2001Baseline/test_abioticResourceDepletionMineralsAndMetals.py +2 -16
- tests/models/edip2003/test_ozoneDepletionPotential.py +0 -13
- tests/models/hestia/test_landCover.py +2 -1
- tests/models/hestia/test_landTransformation20YearAverageDuringCycle.py +2 -1
- tests/models/ipcc2021/test_gwp100.py +0 -9
- tests/models/pooreNemecek2018/test_landOccupationDuringCycle.py +1 -3
- tests/models/poschEtAl2008/test_terrestrialAcidificationPotentialAccumulatedExceedance.py +0 -14
- tests/models/poschEtAl2008/test_terrestrialEutrophicationPotentialAccumulatedExceedance.py +0 -14
- tests/models/site/test_defaultMethodClassification.py +18 -0
- tests/models/site/test_defaultMethodClassificationDescription.py +18 -0
- tests/models/test_config.py +11 -2
- tests/models/utils/test_crop.py +14 -2
- {hestia_earth_models-0.65.9.dist-info → hestia_earth_models-0.65.11.dist-info}/LICENSE +0 -0
- {hestia_earth_models-0.65.9.dist-info → hestia_earth_models-0.65.11.dist-info}/WHEEL +0 -0
- {hestia_earth_models-0.65.9.dist-info → hestia_earth_models-0.65.11.dist-info}/top_level.txt +0 -0
@@ -100,8 +100,8 @@ def download_all_non_renewable_terms(lookup_file_name: str) -> list:
|
|
100
100
|
|
101
101
|
def _valid_resource_indicator(resource: dict) -> bool:
|
102
102
|
return len(resource.get('inputs', [])) == 1 and \
|
103
|
-
|
104
|
-
|
103
|
+
isinstance(_node_value(resource), (int, float)) and \
|
104
|
+
_node_value(resource) > 0
|
105
105
|
|
106
106
|
|
107
107
|
def _valid_input(input: dict) -> bool:
|
@@ -166,9 +166,7 @@ def _should_run(impact_assessment: dict) -> tuple[bool, list]:
|
|
166
166
|
all_inputs_have_valid_mj_value=all_inputs_have_valid_mj_value,
|
167
167
|
energy_resources_used=log_as_table(resource_uses_unpacked))
|
168
168
|
|
169
|
-
should_run =
|
170
|
-
has_valid_input_requirements,
|
171
|
-
all_inputs_have_valid_mj_value])
|
169
|
+
should_run = all([has_resource_use_entries, has_valid_input_requirements, all_inputs_have_valid_mj_value])
|
172
170
|
|
173
171
|
logShouldRun(impact_assessment, MODEL, TERM_ID, should_run)
|
174
172
|
return should_run, valid_energy_resources_in_mj
|
@@ -125,7 +125,7 @@ def _should_run(impact_assessment: dict) -> tuple[bool, list]:
|
|
125
125
|
resource_uses=log_as_table(resource_uses_unpacked)
|
126
126
|
)
|
127
127
|
|
128
|
-
should_run = has_valid_input_requirements
|
128
|
+
should_run = all([has_valid_input_requirements, has_resource_use_entries])
|
129
129
|
|
130
130
|
logShouldRun(impact_assessment, MODEL, TERM_ID, should_run)
|
131
131
|
return should_run, valid_resources_with_cf
|
@@ -609,15 +609,7 @@
|
|
609
609
|
"stage": 2
|
610
610
|
},
|
611
611
|
{
|
612
|
-
"key": "
|
613
|
-
"model": "cycle",
|
614
|
-
"value": "energyContentLowerHeatingValue",
|
615
|
-
"runStrategy": "always",
|
616
|
-
"mergeStrategy": "list",
|
617
|
-
"stage": 2
|
618
|
-
},
|
619
|
-
{
|
620
|
-
"key": "products",
|
612
|
+
"key": "animals",
|
621
613
|
"model": "cycle",
|
622
614
|
"value": "milkYield",
|
623
615
|
"runStrategy": "always",
|
@@ -625,6 +617,14 @@
|
|
625
617
|
"stage": 2
|
626
618
|
}
|
627
619
|
],
|
620
|
+
{
|
621
|
+
"key": "inputs",
|
622
|
+
"model": "cycle",
|
623
|
+
"value": "energyContentLowerHeatingValue",
|
624
|
+
"runStrategy": "always",
|
625
|
+
"mergeStrategy": "list",
|
626
|
+
"stage": 2
|
627
|
+
},
|
628
628
|
{
|
629
629
|
"key": "products",
|
630
630
|
"model": "cycle",
|
@@ -432,6 +432,24 @@
|
|
432
432
|
"stage": 2
|
433
433
|
}
|
434
434
|
],
|
435
|
+
[
|
436
|
+
{
|
437
|
+
"key": "defaultMethodClassification",
|
438
|
+
"model": "site",
|
439
|
+
"value": "defaultMethodClassification",
|
440
|
+
"runStrategy": "add_key_if_missing",
|
441
|
+
"mergeStrategy": "default",
|
442
|
+
"stage": 2
|
443
|
+
},
|
444
|
+
{
|
445
|
+
"key": "defaultMethodClassificationDescription",
|
446
|
+
"model": "site",
|
447
|
+
"value": "defaultMethodClassificationDescription",
|
448
|
+
"runStrategy": "add_key_if_missing",
|
449
|
+
"mergeStrategy": "default",
|
450
|
+
"stage": 2
|
451
|
+
}
|
452
|
+
],
|
435
453
|
[
|
436
454
|
{
|
437
455
|
"key": "measurements",
|
@@ -69,3 +69,9 @@ def load_run_config(node_type: str, stage: int):
|
|
69
69
|
|
70
70
|
def load_trigger_config(node_type: str, stage: int):
|
71
71
|
return _load_stage_config('trigger-calculations', node_type, stage)
|
72
|
+
|
73
|
+
|
74
|
+
def get_max_stage(node_type: str):
|
75
|
+
config = _load_config('run-calculations').get(node_type, {})
|
76
|
+
stages = list(map(lambda k: int(k.replace('stage-', '')), config.keys()))
|
77
|
+
return max(stages)
|
@@ -106,7 +106,7 @@ def _has_depreciated_term(term: dict):
|
|
106
106
|
def _should_run_input(cycle: dict, input_node: dict) -> bool:
|
107
107
|
term = input_node.get('term', {})
|
108
108
|
term_id = term.get('@id')
|
109
|
-
has_lifespan = input_node.get('lifespan'
|
109
|
+
has_lifespan = input_node.get('lifespan') or 0 > 0
|
110
110
|
has_valid_value = _get_value(input_node, 'value') > 0
|
111
111
|
has_depreciated_term = _has_depreciated_term(term)
|
112
112
|
|
@@ -39,12 +39,15 @@ REQUIREMENTS = {
|
|
39
39
|
}
|
40
40
|
}
|
41
41
|
RETURNS = {
|
42
|
-
"
|
43
|
-
"
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
42
|
+
"Animal": [{
|
43
|
+
"practices": [{
|
44
|
+
"@type": "Practice",
|
45
|
+
"value": "",
|
46
|
+
"min": "",
|
47
|
+
"max": "",
|
48
|
+
"sd": "",
|
49
|
+
"statsDefinition": "modelled"
|
50
|
+
}]
|
48
51
|
}]
|
49
52
|
}
|
50
53
|
LOOKUPS = {
|
@@ -15,7 +15,7 @@ corresponding value, the `economicValueShare` will be proportionally distributed
|
|
15
15
|
from hestia_earth.utils.model import find_term_match
|
16
16
|
from hestia_earth.utils.tools import list_sum
|
17
17
|
|
18
|
-
from hestia_earth.models.log import logRequirements, logShouldRun
|
18
|
+
from hestia_earth.models.log import logRequirements, logShouldRun, log_as_table
|
19
19
|
from hestia_earth.models.utils.blank_node import get_lookup_value
|
20
20
|
from hestia_earth.models.utils.cycle import unique_currencies
|
21
21
|
from .utils import lookup_share
|
@@ -55,7 +55,7 @@ MIN_COMPLETE_VALUE = 80 # when the products are complete lower the min threshol
|
|
55
55
|
|
56
56
|
|
57
57
|
def _product(product: dict, value: float):
|
58
|
-
return
|
58
|
+
return product | {MODEL_KEY: value}
|
59
59
|
|
60
60
|
|
61
61
|
def _is_complete(cycle: dict): return cycle.get('completeness', {}).get('product', False)
|
@@ -69,7 +69,7 @@ def _total_evs(products: list): return sum([p.get(MODEL_KEY, 0) for p in product
|
|
69
69
|
|
70
70
|
def _product_with_value(product: dict):
|
71
71
|
value = product.get(MODEL_KEY, lookup_share(MODEL_KEY, product))
|
72
|
-
return
|
72
|
+
return product | {MODEL_KEY: value} if value is not None else product
|
73
73
|
|
74
74
|
|
75
75
|
def _rescale_value(products: list, total: float):
|
@@ -87,7 +87,11 @@ def _should_run_by_default(cycle: dict, products: list):
|
|
87
87
|
for p in products:
|
88
88
|
term_id = p.get('term', {}).get('@id')
|
89
89
|
logRequirements(cycle, model=MODEL, term=term_id, key=MODEL_KEY, by=run_by,
|
90
|
-
all_with_economicValueShare=all_with_economicValueShare
|
90
|
+
all_with_economicValueShare=all_with_economicValueShare,
|
91
|
+
products_with_share=log_as_table([{
|
92
|
+
'id': p.get('term', {}).get('@id'),
|
93
|
+
MODEL_KEY: p.get(MODEL_KEY)
|
94
|
+
} for p in products]))
|
91
95
|
logShouldRun(cycle, MODEL, term_id, should_run, key=MODEL_KEY, by=run_by)
|
92
96
|
|
93
97
|
return should_run
|
@@ -48,18 +48,22 @@ def _run(cycle: dict):
|
|
48
48
|
def _should_run(cycle: dict):
|
49
49
|
def should_run_product(product: dict):
|
50
50
|
term_id = product.get('term', {}).get('@id')
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
is_yield_0 =
|
55
|
-
|
51
|
+
|
52
|
+
value = list_sum(product.get('value') or [], default=None)
|
53
|
+
has_yield = bool(value)
|
54
|
+
is_yield_0 = value == 0
|
55
|
+
|
56
|
+
price = product.get('price') or -1
|
57
|
+
has_price = price > 0
|
58
|
+
is_price_0 = price == 0
|
56
59
|
|
57
60
|
logRequirements(cycle, model=MODEL, term=term_id, key=MODEL_KEY,
|
58
|
-
|
61
|
+
has_yield=has_yield,
|
62
|
+
has_price=has_price,
|
59
63
|
is_yield_0=is_yield_0,
|
60
64
|
is_price_0=is_price_0)
|
61
65
|
|
62
|
-
should_run = any([
|
66
|
+
should_run = any([has_yield and has_price, is_yield_0, is_price_0])
|
63
67
|
logShouldRun(cycle, MODEL, term_id, should_run, key=MODEL_KEY)
|
64
68
|
return should_run
|
65
69
|
return should_run_product
|
@@ -129,7 +129,7 @@ def _run_by_country(cycle: dict, product: dict, country_id: str, year: int = Non
|
|
129
129
|
term_type = product_term.get('termType')
|
130
130
|
term_units = product_term.get('units')
|
131
131
|
|
132
|
-
has_yield = len(product.get('value'
|
132
|
+
has_yield = len(product.get('value') or []) > 0
|
133
133
|
not_already_set = MODEL_KEY not in product.keys()
|
134
134
|
|
135
135
|
# get the grouping used in region lookup
|
@@ -29,6 +29,11 @@ def to_celcius(kelvin_value: int): return kelvin_value - KELVIN_0 if kelvin_valu
|
|
29
29
|
def use_geopandas(): return os.getenv('HEE_USE_GEOPANDAS', 'false') == 'true'
|
30
30
|
|
31
31
|
|
32
|
+
def _has_cache(site: dict):
|
33
|
+
cache = cached_value(site, key=CACHE_VALUE, default=None)
|
34
|
+
return bool(cache)
|
35
|
+
|
36
|
+
|
32
37
|
def _cached_value(site: dict, key: str):
|
33
38
|
return cached_value(site, key=CACHE_VALUE, default={}).get(key)
|
34
39
|
|
@@ -166,23 +171,23 @@ def download(term: str, site: dict, data: dict, only_coordinates=False) -> dict:
|
|
166
171
|
Data returned from the API.
|
167
172
|
"""
|
168
173
|
# check if we have cached the result already, else run and parse result
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
174
|
+
if _has_cache(site):
|
175
|
+
# even if the cached value is null, we do not want to run the query again
|
176
|
+
# TODO: we might want to store the date it was cached, and run again if more than 30 days
|
177
|
+
return _get_cached_data(term, site, data)
|
178
|
+
|
179
|
+
location_data = geospatial_data(site, only_coordinates=only_coordinates)
|
180
|
+
query = {
|
181
|
+
'ee_type': data.get('ee_type'),
|
182
|
+
**location_data,
|
183
|
+
'collections': [
|
184
|
+
{
|
185
|
+
**data,
|
186
|
+
'collection': _collection_name(data.get('collection'))
|
187
|
+
}
|
188
|
+
]
|
189
|
+
}
|
190
|
+
value = _parse_run_query(term, query)
|
186
191
|
if value is None:
|
187
192
|
debugValues(site, model=MODEL, term=term, value_from_earth_engine=None)
|
188
193
|
return value
|
@@ -82,7 +82,8 @@ LOOKUPS = {
|
|
82
82
|
"Permanent meadows and pastures"
|
83
83
|
],
|
84
84
|
"crop": ["cropGroupingFaostatArea", "IPCC_LAND_USE_CATEGORY"],
|
85
|
-
"landCover": ["cropGroupingFaostatProduction", "FAOSTAT_LAND_AREA_CATEGORY"]
|
85
|
+
"landCover": ["cropGroupingFaostatProduction", "FAOSTAT_LAND_AREA_CATEGORY"],
|
86
|
+
"property": "CALCULATE_TOTAL_LAND_COVER_SHARE_SEPARATELY"
|
86
87
|
}
|
87
88
|
MODEL_KEY = 'landCover'
|
88
89
|
|
@@ -120,6 +121,13 @@ def site_area_sum_to_100(dict_of_percentages: dict):
|
|
120
121
|
math.isclose(sum(dict_of_percentages.values()), 0.0, rel_tol=0.01))
|
121
122
|
|
122
123
|
|
124
|
+
def _should_group_landCover(term: dict):
|
125
|
+
return any(
|
126
|
+
bool(get_lookup_value(lookup_term=prop.get("term", {}), column="CALCULATE_TOTAL_LAND_COVER_SHARE_SEPARATELY"))
|
127
|
+
for prop in term.get("properties", [])
|
128
|
+
)
|
129
|
+
|
130
|
+
|
123
131
|
def get_changes(country_id: str, end_year: int) -> dict:
|
124
132
|
"""
|
125
133
|
For each entry in ALL_LAND_USE_TERMS, creates a key: value in output dictionary, also TOTAL
|
@@ -714,8 +722,7 @@ def _no_prior_land_cover_data(nodes: list, end_date: str) -> bool:
|
|
714
722
|
return len(previous_nodes) == 0
|
715
723
|
|
716
724
|
|
717
|
-
def _should_run(site: dict) -> tuple[bool, dict]:
|
718
|
-
management_nodes = filter_list_term_type(site.get("management", []), TermTermType.LANDCOVER)
|
725
|
+
def _should_run(site: dict, management_nodes: list) -> tuple[bool, dict]:
|
719
726
|
summarised_nodes = _collect_land_use_types(management_nodes)
|
720
727
|
allowed_land_use_types = [ANNUAL_CROPLAND, PERMANENT_CROPLAND, PERMANENT_PASTURE]
|
721
728
|
relevant_nodes = sorted(
|
@@ -751,8 +758,11 @@ def _should_run(site: dict) -> tuple[bool, dict]:
|
|
751
758
|
|
752
759
|
|
753
760
|
def run(site: dict) -> list:
|
754
|
-
|
755
|
-
|
761
|
+
management_nodes = [
|
762
|
+
node for node in filter_list_term_type(site.get("management", []), TermTermType.LANDCOVER)
|
763
|
+
if not _should_group_landCover(node)
|
764
|
+
]
|
765
|
+
should_run, site_area = _should_run(site=site, management_nodes=management_nodes)
|
756
766
|
return _run_make_management_nodes(
|
757
767
|
existing_nodes=management_nodes,
|
758
768
|
percentage_transformed_from=site_area,
|
@@ -8,7 +8,7 @@ from dateutil.relativedelta import relativedelta
|
|
8
8
|
from hestia_earth.schema import TermTermType
|
9
9
|
from hestia_earth.utils.tools import to_precision
|
10
10
|
|
11
|
-
from hestia_earth.models.log import logRequirements, logShouldRun
|
11
|
+
from hestia_earth.models.log import logRequirements, logShouldRun, debugValues
|
12
12
|
from hestia_earth.models.utils.blank_node import _gapfill_datestr, DatestrGapfillMode, DatestrFormat, _str_dates_match
|
13
13
|
from hestia_earth.models.utils.impact_assessment import get_site
|
14
14
|
from hestia_earth.models.utils.indicator import _new_indicator
|
@@ -78,11 +78,15 @@ def should_run(
|
|
78
78
|
node for node in impact_assessment.get("emissionsResourceUse", [])
|
79
79
|
if node.get("term", {}).get("@id", "") == _RESOURCE_USE_TERM_ID and node.get("value", -1) >= 0
|
80
80
|
]
|
81
|
-
|
82
81
|
filtered_management_nodes = [
|
83
82
|
node for node in site.get("management", [])
|
84
83
|
if node.get("value", -1) >= 0 and node.get("term", {}).get("termType", "") == TermTermType.LANDCOVER.value
|
85
84
|
]
|
85
|
+
land_occupation_during_cycle_found = any(
|
86
|
+
node.get("term", {}).get("@id") in
|
87
|
+
{node.get("landCover", {}).get("@id") for node in relevant_emission_resource_use}
|
88
|
+
for node in filtered_management_nodes
|
89
|
+
)
|
86
90
|
current_node_index = next(
|
87
91
|
(i for i, node in enumerate(filtered_management_nodes)
|
88
92
|
if _str_dates_match(node.get("endDate", ""), impact_assessment.get("endDate", ""))),
|
@@ -96,19 +100,18 @@ def should_run(
|
|
96
100
|
historic_date_offset=historic_date_offset
|
97
101
|
)
|
98
102
|
|
99
|
-
logRequirements(
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
site=site
|
104
|
-
)
|
103
|
+
logRequirements(impact_assessment, model=MODEL, term=term_id,
|
104
|
+
closest_date=closest_date_str,
|
105
|
+
land_occupation_during_cycle_found=land_occupation_during_cycle_found,
|
106
|
+
land_cover_term_id=(current_node or {}).get('term', {}).get('@id'))
|
105
107
|
|
106
108
|
should_run_result = all([
|
107
|
-
relevant_emission_resource_use
|
109
|
+
relevant_emission_resource_use,
|
110
|
+
land_occupation_during_cycle_found,
|
108
111
|
current_node,
|
109
112
|
close_date_found
|
110
113
|
])
|
111
|
-
logShouldRun(
|
114
|
+
logShouldRun(impact_assessment, MODEL, term=term_id, should_run=should_run_result)
|
112
115
|
|
113
116
|
return should_run_result, current_node, closest_date_str
|
114
117
|
|
@@ -126,6 +129,7 @@ def _get_land_occupation_for_land_use_type(impact_assessment: dict, ipcc_land_us
|
|
126
129
|
|
127
130
|
def _calculate_indicator_value(
|
128
131
|
impact_assessment: dict,
|
132
|
+
term_id: str,
|
129
133
|
management_nodes: list,
|
130
134
|
ipcc_land_use_category: str,
|
131
135
|
previous_land_cover_id: str,
|
@@ -143,6 +147,13 @@ def _calculate_indicator_value(
|
|
143
147
|
node.get("value", 0) for node in management_nodes
|
144
148
|
if node.get("term", {}).get("@id", "") == previous_land_cover_id
|
145
149
|
)
|
150
|
+
|
151
|
+
debugValues(impact_assessment, model=MODEL, term=term_id,
|
152
|
+
ipcc_land_use_category=ipcc_land_use_category,
|
153
|
+
land_occupation_for_cycle=land_occupation_for_cycle,
|
154
|
+
historical_land_use=historical_land_use,
|
155
|
+
historic_date_offset=historic_date_offset)
|
156
|
+
|
146
157
|
return ((land_occupation_for_cycle * historical_land_use) / 100) / historic_date_offset
|
147
158
|
|
148
159
|
|
@@ -164,6 +175,7 @@ def _run_calculate_transformation(
|
|
164
175
|
previous_land_cover_id=previous_land_cover_id,
|
165
176
|
value=_calculate_indicator_value(
|
166
177
|
impact_assessment=impact_assessment,
|
178
|
+
term_id=term_id,
|
167
179
|
management_nodes=[
|
168
180
|
node for node in site.get("management", [])
|
169
181
|
if _str_dates_match(node.get("endDate", ""), closest_date_str)
|
@@ -191,10 +203,10 @@ def run_resource_use(
|
|
191
203
|
historic_date_offset=historic_date_offset
|
192
204
|
)
|
193
205
|
return _run_calculate_transformation(
|
206
|
+
impact_assessment=impact_assessment,
|
207
|
+
site=site,
|
194
208
|
term_id=term_id,
|
195
209
|
current_node=current_node,
|
196
210
|
closest_date_str=closest_date_str,
|
197
|
-
site=site,
|
198
|
-
impact_assessment=impact_assessment,
|
199
211
|
historic_date_offset=historic_date_offset
|
200
212
|
) if _should_run else []
|
@@ -43,6 +43,5 @@ def crop_ipcc_land_use_category(
|
|
43
43
|
return get_lookup_value(
|
44
44
|
lookup_term={"@id": crop_term_id, "type": "Term", "termType": lookup_term_type},
|
45
45
|
column='IPCC_LAND_USE_CATEGORY',
|
46
|
-
model=MODEL
|
47
|
-
term={"@id": crop_term_id, "type": "Term", "termType": lookup_term_type}
|
46
|
+
model=MODEL
|
48
47
|
)
|
@@ -233,7 +233,7 @@ def _run_practice(animal: dict, values: dict, meanDE: float, meanECHHV: float, R
|
|
233
233
|
'weightAtOneYear',
|
234
234
|
'weightAtSlaughter'
|
235
235
|
])
|
236
|
-
has_positive_feed_values = all([NEm_feed
|
236
|
+
has_positive_feed_values = all([NEm_feed >= 0, NEg_feed >= 0])
|
237
237
|
|
238
238
|
logRequirements(animal, model=MODEL, term=input_term_id, model_key=MODEL_KEY,
|
239
239
|
feed_logs=log_as_table(log_feed),
|
@@ -212,7 +212,7 @@ def _run_practice(cycle: dict, meanDE: float, meanECHHV: float, REM: float, REG:
|
|
212
212
|
'weightAtOneYear',
|
213
213
|
'weightAtSlaughter'
|
214
214
|
])
|
215
|
-
has_positive_feed_values = all([NEm_feed
|
215
|
+
has_positive_feed_values = all([NEm_feed >= 0, NEg_feed >= 0])
|
216
216
|
|
217
217
|
logRequirements(cycle, model=MODEL, term=input_term_id, model_key=MODEL_KEY,
|
218
218
|
feed_logs=log_as_table(log_feed),
|