hestia-earth-models 0.61.7__py3-none-any.whl → 0.62.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hestia-earth-models might be problematic. Click here for more details.

Files changed (51) hide show
  1. hestia_earth/models/cycle/completeness/electricityFuel.py +60 -0
  2. hestia_earth/models/cycle/product/economicValueShare.py +47 -31
  3. hestia_earth/models/emepEea2019/nh3ToAirInorganicFertiliser.py +44 -59
  4. hestia_earth/models/geospatialDatabase/histosol.py +4 -0
  5. hestia_earth/models/ipcc2006/co2ToAirOrganicSoilCultivation.py +4 -2
  6. hestia_earth/models/ipcc2006/n2OToAirOrganicSoilCultivationDirect.py +1 -1
  7. hestia_earth/models/ipcc2019/aboveGroundCropResidueTotal.py +1 -1
  8. hestia_earth/models/ipcc2019/animal/pastureGrass.py +30 -24
  9. hestia_earth/models/ipcc2019/belowGroundCropResidue.py +1 -1
  10. hestia_earth/models/ipcc2019/ch4ToAirExcreta.py +1 -1
  11. hestia_earth/models/ipcc2019/co2ToAirSoilOrganicCarbonStockChangeManagementChange.py +511 -458
  12. hestia_earth/models/ipcc2019/co2ToAirUreaHydrolysis.py +5 -1
  13. hestia_earth/models/ipcc2019/organicCarbonPerHa.py +116 -3882
  14. hestia_earth/models/ipcc2019/organicCarbonPerHa_tier_1_utils.py +2060 -0
  15. hestia_earth/models/ipcc2019/organicCarbonPerHa_tier_2_utils.py +1630 -0
  16. hestia_earth/models/ipcc2019/organicCarbonPerHa_utils.py +324 -0
  17. hestia_earth/models/ipcc2019/pastureGrass.py +37 -19
  18. hestia_earth/models/ipcc2019/pastureGrass_utils.py +4 -21
  19. hestia_earth/models/mocking/search-results.json +293 -289
  20. hestia_earth/models/site/organicCarbonPerHa.py +58 -44
  21. hestia_earth/models/site/soilMeasurement.py +18 -13
  22. hestia_earth/models/utils/__init__.py +28 -0
  23. hestia_earth/models/utils/array_builders.py +578 -0
  24. hestia_earth/models/utils/blank_node.py +55 -39
  25. hestia_earth/models/utils/descriptive_stats.py +285 -0
  26. hestia_earth/models/utils/emission.py +73 -2
  27. hestia_earth/models/utils/inorganicFertiliser.py +2 -2
  28. hestia_earth/models/utils/measurement.py +118 -4
  29. hestia_earth/models/version.py +1 -1
  30. {hestia_earth_models-0.61.7.dist-info → hestia_earth_models-0.62.0.dist-info}/METADATA +2 -2
  31. {hestia_earth_models-0.61.7.dist-info → hestia_earth_models-0.62.0.dist-info}/RECORD +51 -39
  32. tests/models/cycle/completeness/test_electricityFuel.py +21 -0
  33. tests/models/cycle/product/test_economicValueShare.py +8 -0
  34. tests/models/emepEea2019/test_nh3ToAirInorganicFertiliser.py +2 -2
  35. tests/models/ipcc2019/animal/test_pastureGrass.py +2 -2
  36. tests/models/ipcc2019/test_co2ToAirSoilOrganicCarbonStockChangeManagementChange.py +55 -165
  37. tests/models/ipcc2019/test_organicCarbonPerHa.py +219 -460
  38. tests/models/ipcc2019/test_organicCarbonPerHa_tier_1_utils.py +471 -0
  39. tests/models/ipcc2019/test_organicCarbonPerHa_tier_2_utils.py +208 -0
  40. tests/models/ipcc2019/test_organicCarbonPerHa_utils.py +75 -0
  41. tests/models/ipcc2019/test_pastureGrass.py +0 -16
  42. tests/models/site/test_organicCarbonPerHa.py +3 -12
  43. tests/models/site/test_soilMeasurement.py +3 -18
  44. tests/models/utils/test_array_builders.py +253 -0
  45. tests/models/utils/test_blank_node.py +154 -15
  46. tests/models/utils/test_descriptive_stats.py +134 -0
  47. tests/models/utils/test_emission.py +51 -1
  48. tests/models/utils/test_measurement.py +54 -2
  49. {hestia_earth_models-0.61.7.dist-info → hestia_earth_models-0.62.0.dist-info}/LICENSE +0 -0
  50. {hestia_earth_models-0.61.7.dist-info → hestia_earth_models-0.62.0.dist-info}/WHEEL +0 -0
  51. {hestia_earth_models-0.61.7.dist-info → hestia_earth_models-0.62.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,75 @@
1
+ from pytest import mark
2
+
3
+ from hestia_earth.models.ipcc2019.organicCarbonPerHa_utils import (
4
+ format_bool, format_bool_list, format_enum, format_number, format_number_list, IpccSoilCategory
5
+ )
6
+
7
+
8
+ @mark.parametrize(
9
+ "value, expected",
10
+ [
11
+ (True, "True"),
12
+ (False, "False"),
13
+ ([], "False"),
14
+ ("str", "True"),
15
+ (None, "False")
16
+ ],
17
+ ids=["True", "False", "list", "str", "None"]
18
+ )
19
+ def test_format_bool(value, expected):
20
+ assert format_bool(value) == expected
21
+
22
+
23
+ @mark.parametrize(
24
+ "value, expected",
25
+ [
26
+ ([True, True, False], "True True False"),
27
+ ([], "None"),
28
+ (["Yes", "No", ""], "True True False"),
29
+ (None, "None")
30
+ ],
31
+ ids=["list", "empty list", "list[str]", "None"]
32
+ )
33
+ def test_format_bool_list(value, expected):
34
+ assert format_bool_list(value) == expected
35
+
36
+
37
+ @mark.parametrize(
38
+ "value, expected",
39
+ [
40
+ (IpccSoilCategory.WETLAND_SOILS, IpccSoilCategory.WETLAND_SOILS.value),
41
+ ("str", "None"),
42
+ (None, "None")
43
+ ],
44
+ ids=["Enum", "str", "None"]
45
+ )
46
+ def test_format_enum(value, expected):
47
+ assert format_enum(value) == expected
48
+
49
+
50
+ @mark.parametrize(
51
+ "value, expected",
52
+ [
53
+ (3.141592653, "3.1"),
54
+ (0, "0.0"),
55
+ ("20", "None"),
56
+ (None, "None")
57
+ ],
58
+ ids=["float", "zero", "str", "None"]
59
+ )
60
+ def test_format_number(value, expected):
61
+ assert format_number(value) == expected
62
+
63
+
64
+ @mark.parametrize(
65
+ "value, expected",
66
+ [
67
+ ([3.14, 31.4, 314], "3.1 31.4 314.0"),
68
+ ([], "None"),
69
+ (["Yes", "No", ""], "None None None"),
70
+ (None, "None")
71
+ ],
72
+ ids=["list", "empty list", "list[str]", "None"]
73
+ )
74
+ def test_format_number_list(value, expected):
75
+ assert format_number_list(value) == expected
@@ -50,22 +50,6 @@ def test_run(*args):
50
50
  assert value == expected
51
51
 
52
52
 
53
- @patch(f"{class_path_utils}.download_hestia", side_effect=fake_download_hestia)
54
- @patch("hestia_earth.models.utils.property.download_hestia", side_effect=fake_download_hestia)
55
- @patch(f"{class_path}.get_wool_terms", return_value=WOOL_TERMS)
56
- @patch(f"hestia_earth.models.{MODEL}.utils.get_milkYield_terms", return_value=MILK_YIELD_TERMS)
57
- @patch(f"{class_path}._new_input", side_effect=fake_new_input)
58
- def test_run_with_feed(*args):
59
- with open(f"{fixtures_folder}/with-feed/cycle.jsonld", encoding='utf-8') as f:
60
- cycle = json.load(f)
61
-
62
- with open(f"{fixtures_folder}/with-feed/result.jsonld", encoding='utf-8') as f:
63
- expected = json.load(f)
64
-
65
- value = run(cycle)
66
- assert value == expected
67
-
68
-
69
53
  @patch(f"{class_path_utils}.download_hestia", side_effect=fake_download_hestia)
70
54
  @patch("hestia_earth.models.utils.property.download_hestia", side_effect=fake_download_hestia)
71
55
  @patch(f"{class_path}.get_wool_terms", return_value=WOOL_TERMS)
@@ -5,7 +5,7 @@ from unittest.mock import patch
5
5
  from tests.utils import fixtures_path, fake_new_measurement
6
6
 
7
7
  from hestia_earth.models.site.organicCarbonPerHa import (
8
- MODEL, TERM_ID, run, _cdf, _c_to_depth, _get_most_relevant_soc_node, _get_last_date, _should_run_calculation_group
8
+ MODEL, TERM_ID, run, _cdf, _c_to_depth, _get_most_relevant_soc_node, _get_last_date
9
9
  )
10
10
 
11
11
  class_path = f"hestia_earth.models.{MODEL}.{TERM_ID}"
@@ -14,6 +14,8 @@ fixtures_folder = f"{fixtures_path}/{MODEL}/{TERM_ID}"
14
14
  SUBFOLDERS = [
15
15
  "calculate-single",
16
16
  "calculate-multiple",
17
+ "calculate-multiple-with-existing-soc-measurements", # Closes #823
18
+ "calculate-multiple-with-multiple-methods", # Closes #823
17
19
  "rescale-single",
18
20
  "rescale-multiple",
19
21
  "calculate-and-rescale"
@@ -82,17 +84,6 @@ def test_get_most_relevant_soc_node(nodes, expected_id):
82
84
  assert _get_most_relevant_soc_node(nodes).get("@id") == expected_id
83
85
 
84
86
 
85
- @patch(f"{class_path}.find_term_match")
86
- def test_should_run_measurements(mock_find):
87
- # no measurement => no run
88
- mock_find.return_value = {}
89
- assert _should_run_calculation_group([]) is False
90
-
91
- # with measurement => run
92
- mock_find.return_value = {'value': [10], 'depthUpper': 0, 'depthLower': 10}
93
- assert _should_run_calculation_group([]) is True
94
-
95
-
96
87
  @mark.parametrize("subfolder", SUBFOLDERS)
97
88
  @patch(f"{class_path}.get_source", return_value={})
98
89
  @patch(f"{class_path}._new_measurement", side_effect=fake_new_measurement)
@@ -135,23 +135,11 @@ def test_harmonise_measurements(measurements_list, returns_dict, expected_value)
135
135
  )
136
136
  ]
137
137
  )
138
- @patch(f"{class_path}.get_lookup_value")
139
- def test_should_run(mock_get_lookup, test_name, site, expected_should_run):
140
- mock_get_lookup.return_value = True
141
- model_key = "clayContent"
142
- should_run, *args = _should_run(site=site, model_key=model_key)
138
+ def test_should_run(test_name, site, expected_should_run):
139
+ should_run, *args = _should_run(site)
143
140
  assert should_run == expected_should_run, test_name
144
141
 
145
142
 
146
- def lookup_side_effect(*args, **kwargs):
147
- _ = kwargs
148
- if args[0]["@id"] == "soilPh" and args[1] == "depthSensitive":
149
- return False
150
- elif args[0]["@id"] in {"baseSaturation", "soilDepth", "rainfallHourly"}:
151
- return False
152
- return True
153
-
154
-
155
143
  @pytest.mark.parametrize(
156
144
  "test_name",
157
145
  [
@@ -165,10 +153,7 @@ def lookup_side_effect(*args, **kwargs):
165
153
  ]
166
154
  )
167
155
  @patch(f"{class_path}._new_measurement", side_effect=fake_new_measurement)
168
- @patch(f"{class_path}.get_lookup_value")
169
- def test_run(mock_lookup, mock_new_measurement, test_name):
170
- mock_lookup.side_effect = lookup_side_effect
171
-
156
+ def test_run(mock_new_measurement, test_name):
172
157
  with open(f"{fixtures_folder}/{test_name}/site.jsonld", encoding='utf-8') as f:
173
158
  site = json.load(f)
174
159
 
@@ -0,0 +1,253 @@
1
+ from numpy import array
2
+ from numpy.testing import assert_array_equal
3
+ from numpy.typing import NDArray
4
+ from pytest import mark
5
+
6
+ from hestia_earth.models.utils.array_builders import (
7
+ avg_run_in_columnwise, avg_run_in_rowwise, discrete_uniform_1d, discrete_uniform_2d, gen_seed, grouped_avg,
8
+ normal_1d, normal_2d, plus_minus_uncertainty_to_normal_1d, plus_minus_uncertainty_to_normal_2d,
9
+ repeat_1d_array_as_columns, repeat_array_as_columns, repeat_array_as_rows, repeat_single, triangular_1d,
10
+ triangular_2d, truncated_normal_1d, truncated_normal_2d
11
+ )
12
+
13
+ SEED = 0
14
+ SHAPE = (1000, 1000)
15
+
16
+
17
+ def assert_rows_identical(arr: NDArray):
18
+ """
19
+ Covert array to a set to remove repeated rows and check that number remaining rows is 1.
20
+ """
21
+ assert len(set(map(tuple, arr))) == 1
22
+
23
+
24
+ def assert_rows_unique(arr: NDArray):
25
+ """
26
+ Covert array to a set to remove repeated rows and check that number remaining rows is the same as the number of
27
+ original rows.
28
+ """
29
+ assert len(set(map(tuple, arr))) == len(arr)
30
+
31
+
32
+ def assert_elements_between(arr: NDArray, min: float, max: float):
33
+ assert ((min <= arr) & (arr <= max)).all()
34
+
35
+
36
+ PARAMS_REPEAT_SINGLE = [
37
+ (3.14159, None, 3.14159),
38
+ (3.14159, bool, True),
39
+ (True, None, True),
40
+ (True, float, 1)
41
+ ]
42
+
43
+ IDS_REPEAT_SINGLE = [
44
+ f"{type(value).__name__}{f' -> {dtype.__name__}' if dtype else ''}" for value, dtype, _ in PARAMS_REPEAT_SINGLE
45
+ ]
46
+
47
+
48
+ @mark.parametrize(
49
+ "value, dtype, expected_element",
50
+ [(3.14159, None, 3.14159), (3.14159, bool, True), (True, None, True), (True, float, 1)],
51
+ ids=IDS_REPEAT_SINGLE
52
+ )
53
+ def test_repeat_single(value, dtype, expected_element):
54
+ SHAPE = (3, 3)
55
+ EXPECTED = array([
56
+ [expected_element, expected_element, expected_element],
57
+ [expected_element, expected_element, expected_element],
58
+ [expected_element, expected_element, expected_element]
59
+ ])
60
+ result = repeat_single(SHAPE, value, dtype=dtype)
61
+ assert_array_equal(result, EXPECTED)
62
+
63
+
64
+ def test_repeat_array_as_columns():
65
+ INPUT = array([
66
+ [1, 2, 3],
67
+ [4, 5, 6]
68
+ ])
69
+ EXPECTED = array([
70
+ [1, 2, 3, 1, 2, 3],
71
+ [4, 5, 6, 4, 5, 6]
72
+ ])
73
+ result = repeat_array_as_columns(2, INPUT)
74
+ assert_array_equal(result, EXPECTED)
75
+
76
+
77
+ def test_repeat_array_as_rows():
78
+ INPUT = array([
79
+ [1, 2, 3],
80
+ [4, 5, 6]
81
+ ])
82
+ EXPECTED = array([
83
+ [1, 2, 3],
84
+ [4, 5, 6],
85
+ [1, 2, 3],
86
+ [4, 5, 6]
87
+ ])
88
+ result = repeat_array_as_rows(2, INPUT)
89
+ assert_array_equal(result, EXPECTED)
90
+
91
+
92
+ def test_repeat_1d_array_as_columns():
93
+ INPUT = array([1, 2, 3])
94
+ EXPECTED = array([
95
+ [1, 1, 1],
96
+ [2, 2, 2],
97
+ [3, 3, 3]
98
+ ])
99
+ result = repeat_1d_array_as_columns(3, INPUT)
100
+ assert_array_equal(result, EXPECTED)
101
+
102
+
103
+ def test_discrete_uniform_1d():
104
+ MIN, MAX = -100, 100
105
+ result = discrete_uniform_1d(SHAPE, MIN, MAX, seed=SEED)
106
+ assert_rows_identical(result)
107
+ assert_elements_between(result, MIN, MAX)
108
+ assert result.shape == SHAPE
109
+
110
+
111
+ def test_discrete_uniform_2d():
112
+ MIN, MAX = -100, 100
113
+ result = discrete_uniform_2d(SHAPE, MIN, MAX, seed=SEED)
114
+ assert_rows_unique(result)
115
+ assert_elements_between(result, MIN, MAX)
116
+ assert result.shape == SHAPE
117
+
118
+
119
+ def test_discrete_triangular_1d():
120
+ LOW, HIGH = -100, 100
121
+ MODE = -50
122
+ result = triangular_1d(SHAPE, LOW, HIGH, MODE, seed=SEED)
123
+ assert_rows_identical(result)
124
+ assert_elements_between(result, LOW, HIGH)
125
+ assert result.shape == SHAPE
126
+
127
+
128
+ def test_discrete_triangular_2d():
129
+ LOW, HIGH = -100, 100
130
+ MODE = 50
131
+ result = triangular_2d(SHAPE, LOW, HIGH, MODE, seed=SEED)
132
+ assert_rows_unique(result)
133
+ assert_elements_between(result, LOW, HIGH)
134
+ assert result.shape == SHAPE
135
+
136
+
137
+ def test_normal_1d():
138
+ MEAN = 0
139
+ SD = 50
140
+ result = normal_1d(SHAPE, MEAN, SD, seed=SEED)
141
+ assert_rows_identical(result)
142
+ assert result.shape == SHAPE
143
+
144
+
145
+ def test_normal_2d():
146
+ MEAN = 0
147
+ SD = 50
148
+ result = normal_2d(SHAPE, MEAN, SD, seed=SEED)
149
+ assert_rows_unique(result)
150
+ assert result.shape == SHAPE
151
+
152
+
153
+ def test_truncated_normal_1d():
154
+ MEAN = 0
155
+ SD = 50
156
+ LOW, HIGH = -50, 50
157
+ result = truncated_normal_1d(SHAPE, MEAN, SD, LOW, HIGH, seed=SEED)
158
+ assert_rows_identical(result)
159
+ assert_elements_between(result, LOW, HIGH)
160
+ assert result.shape == SHAPE
161
+
162
+
163
+ def test_truncated_normal_2d():
164
+ MEAN = 0
165
+ SD = 50
166
+ LOW, HIGH = -50, 50
167
+ result = truncated_normal_2d(SHAPE, MEAN, SD, LOW, HIGH, seed=SEED)
168
+ assert_rows_unique(result)
169
+ assert_elements_between(result, LOW, HIGH)
170
+ assert result.shape == SHAPE
171
+
172
+
173
+ def test_plus_minus_uncertainty_to_normal_1d():
174
+ MEAN = 10
175
+ UNCERTAINTY = 10
176
+ CONFIDENCE_INTERVAL = 95
177
+ result = plus_minus_uncertainty_to_normal_1d(SHAPE, MEAN, UNCERTAINTY, CONFIDENCE_INTERVAL)
178
+ assert_rows_identical(result)
179
+ assert result.shape == SHAPE
180
+
181
+
182
+ def test_plus_minus_uncertainty_to_normal_2d():
183
+ MEAN = 10
184
+ UNCERTAINTY = 10
185
+ CONFIDENCE_INTERVAL = 95
186
+ result = plus_minus_uncertainty_to_normal_2d(SHAPE, MEAN, UNCERTAINTY, CONFIDENCE_INTERVAL)
187
+ assert_rows_unique(result)
188
+ assert result.shape == SHAPE
189
+
190
+
191
+ def test_grouped_avg():
192
+ INPUT = array([
193
+ [1, 2, 3],
194
+ [4, 5, 6],
195
+ [7, 8, 9],
196
+ [10, 11, 12],
197
+ [13, 14, 15],
198
+ [16, 17, 18]
199
+ ])
200
+ EXPECTED = array([
201
+ [4, 5, 6],
202
+ [13, 14, 15]
203
+ ])
204
+ result = grouped_avg(INPUT, n=3)
205
+ assert_array_equal(result, EXPECTED)
206
+
207
+
208
+ def test_avg_run_in_columnwise():
209
+ INPUT = array([
210
+ [1, 2, 3],
211
+ [4, 5, 6],
212
+ [7, 8, 9],
213
+ [10, 11, 12],
214
+ [13, 14, 15],
215
+ [16, 17, 18]
216
+ ])
217
+ EXPECTED = array([
218
+ [4, 5, 6],
219
+ [10, 11, 12],
220
+ [13, 14, 15],
221
+ [16, 17, 18]
222
+ ])
223
+ result = avg_run_in_columnwise(INPUT, n=3)
224
+ assert_array_equal(result, EXPECTED)
225
+
226
+
227
+ def test_avg_run_in_rowwise():
228
+ INPUT = array([
229
+ [1, 2, 3, 4, 5],
230
+ [6, 7, 8, 9, 10],
231
+ [11, 12, 13, 14, 15]
232
+ ])
233
+ EXPECTED = array([
234
+ [2, 4, 5],
235
+ [7, 9, 10],
236
+ [12, 14, 15]
237
+ ])
238
+ result = avg_run_in_rowwise(INPUT, n=3)
239
+ assert_array_equal(result, EXPECTED)
240
+
241
+
242
+ def test_gen_seed():
243
+ NODE = {"@id": "site"}
244
+ EXPECTED = 2926675914
245
+ result = gen_seed(NODE)
246
+ assert result == EXPECTED
247
+
248
+
249
+ def test_gen_seed_no_id():
250
+ NODE = {}
251
+ EXPECTED = 2140941220
252
+ result = gen_seed(NODE)
253
+ assert result == EXPECTED
@@ -21,7 +21,8 @@ from hestia_earth.models.utils.blank_node import (
21
21
  get_node_value,
22
22
  group_nodes_by_year,
23
23
  group_nodes_by_year_and_month,
24
- GroupNodesByYearMode
24
+ GroupNodesByYearMode,
25
+ split_node_by_dates
25
26
  )
26
27
 
27
28
 
@@ -1017,33 +1018,67 @@ def test_group_nodes_by_year_multiple_values_and_dates(mock_parse, system_dateti
1017
1018
  "2001-02",
1018
1019
  "2002-03",
1019
1020
  "2003-01"
1021
+ ],
1022
+ "sd": [
1023
+ 0.8, 0.9, 1.0, 0.9, 0.8
1024
+ ],
1025
+ "observations": [
1026
+ 100, 100, 100, 100, 100
1020
1027
  ]
1021
1028
  }]
1022
1029
 
1023
1030
  EXPECTED = {
1024
- 2000: [{
1025
- "dates": ["2000-01", "2000-06"],
1026
- "fraction_of_node_duration": 0.32475598935226263,
1027
- "fraction_of_group_duration": 1.0,
1028
- "value": [1, 2]
1029
- }],
1031
+ 2000: [
1032
+ {
1033
+ "dates": ["2000-01"],
1034
+ "fraction_of_node_duration": 1.0,
1035
+ "fraction_of_group_duration": 0.08469945355191257,
1036
+ "value": [1],
1037
+ "sd": [0.8],
1038
+ "observations": [
1039
+ 100
1040
+ ]
1041
+ },
1042
+ {
1043
+ "dates": ["2000-06"],
1044
+ "fraction_of_node_duration": 1.0,
1045
+ "fraction_of_group_duration": 0.08196721311475409,
1046
+ "value": [2],
1047
+ "sd": [0.9],
1048
+ "observations": [
1049
+ 100
1050
+ ]
1051
+ }
1052
+ ],
1030
1053
  2001: [{
1031
1054
  "dates": ["2001-02"],
1032
- "fraction_of_node_duration": 0.323868677905945,
1033
- "fraction_of_group_duration": 1.0,
1034
- "value": [3]
1055
+ "fraction_of_node_duration": 1.0,
1056
+ "fraction_of_group_duration": 0.07671232876712329,
1057
+ "value": [3],
1058
+ "sd": [1.0],
1059
+ "observations": [
1060
+ 100
1061
+ ]
1035
1062
  }],
1036
1063
  2002: [{
1037
1064
  "dates": ["2002-03"],
1038
- "fraction_of_node_duration": 0.323868677905945,
1039
- "fraction_of_group_duration": 1.0,
1040
- "value": [4]
1065
+ "fraction_of_node_duration": 1.0,
1066
+ "fraction_of_group_duration": 0.08493150684931507,
1067
+ "value": [4],
1068
+ "sd": [0.9],
1069
+ "observations": [
1070
+ 100
1071
+ ]
1041
1072
  }],
1042
1073
  2003: [{
1043
1074
  "dates": ["2003-01"],
1044
- "fraction_of_node_duration": 0.027506654835847383,
1075
+ "fraction_of_node_duration": 1.0,
1045
1076
  "fraction_of_group_duration": 0.08493150684931507,
1046
- "value": [5]
1077
+ "value": [5],
1078
+ "sd": [0.8],
1079
+ "observations": [
1080
+ 100
1081
+ ]
1047
1082
  }]
1048
1083
  }
1049
1084
 
@@ -1134,3 +1169,107 @@ def test_group_nodes_by_year_and_month(mock_parse, system_datetime):
1134
1169
 
1135
1170
  result = group_nodes_by_year_and_month(MANAGEMENT)
1136
1171
  assert result == EXPECTED
1172
+
1173
+
1174
+ # node, expected
1175
+ PARAMS_SPLIT_NODE = [
1176
+ (
1177
+ {},
1178
+ [{}]
1179
+ ),
1180
+ (
1181
+ {"value": [1, 2, 3], "dates": ["2000"]},
1182
+ [{"value": [1, 2, 3], "dates": ["2000"]}]
1183
+ ),
1184
+ (
1185
+ {"value": [1, 2, 3], "startDate": "2000", "endDate": "2001"},
1186
+ [{"value": [1, 2, 3], "startDate": "2000", "endDate": "2001"}]
1187
+ ),
1188
+ (
1189
+ {"value": 1, "startDate": "2000", "endDate": "2001"},
1190
+ [{"value": 1, "startDate": "2000", "endDate": "2001"}]
1191
+ ),
1192
+ (
1193
+ {"value": None},
1194
+ [{"value": None}]
1195
+ ),
1196
+ (
1197
+ {"value": [1, 2, 3], "dates": ["2000", "2001", "2002"]},
1198
+ [
1199
+ {"value": [1], "dates": ["2000"]},
1200
+ {"value": [2], "dates": ["2001"]},
1201
+ {"value": [3], "dates": ["2002"]}
1202
+ ]
1203
+ ),
1204
+ (
1205
+ {
1206
+ "value": [1, 2],
1207
+ "dates": ["2000", "2001"],
1208
+ "sd": [0.816496, 0.816496],
1209
+ "min": [0, 1],
1210
+ "max": [2, 3],
1211
+ "observations": [3, 3]
1212
+ },
1213
+ [
1214
+ {
1215
+ "value": [1],
1216
+ "dates": ["2000"],
1217
+ "sd": [0.816496],
1218
+ "min": [0],
1219
+ "max": [2],
1220
+ "observations": [3]
1221
+ },
1222
+ {
1223
+ "value": [2],
1224
+ "dates": ["2001"],
1225
+ "sd": [0.816496],
1226
+ "min": [1],
1227
+ "max": [3],
1228
+ "observations": [3]
1229
+ }
1230
+ ]
1231
+ ),
1232
+ (
1233
+ {
1234
+ "value": [1, 2],
1235
+ "dates": ["2000", "2001"],
1236
+ "sd": [0.816496, 0.816496],
1237
+ "min": [0, 1],
1238
+ "max": [2, 3],
1239
+ "observations": [3]
1240
+ },
1241
+ [
1242
+ {
1243
+ "value": [1],
1244
+ "dates": ["2000"],
1245
+ "sd": [0.816496],
1246
+ "min": [0],
1247
+ "max": [2],
1248
+ "observations": [3]
1249
+ },
1250
+ {
1251
+ "value": [2],
1252
+ "dates": ["2001"],
1253
+ "sd": [0.816496],
1254
+ "min": [1],
1255
+ "max": [3],
1256
+ "observations": [3]
1257
+ }
1258
+ ]
1259
+ )
1260
+ ]
1261
+ IDS_SPLIT_NODE = [
1262
+ "no split -> empty node",
1263
+ "no split -> not enough dates", # len(value) and len(dates) MUST match
1264
+ "no split -> startDate & endDate",
1265
+ "no split -> non-iterable value", # i.e., on a Management or Animal node.
1266
+ "no split -> null value", # i.e., on a Animal node where value is not required.
1267
+ "value & dates",
1268
+ "descriptive statistics",
1269
+ "descriptive statistics w/ bad key" # if descriptive statistic keys have wrong length, don't split them
1270
+ ]
1271
+
1272
+
1273
+ @mark.parametrize("node, expected", PARAMS_SPLIT_NODE, ids=IDS_SPLIT_NODE)
1274
+ def test_split_node_by_dates(node, expected):
1275
+ assert split_node_by_dates(node) == expected