openforis-whisp 2.0.0a4__py3-none-any.whl → 2.0.0a6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
openforis_whisp/risk.py CHANGED
@@ -1,777 +1,771 @@
1
- import pandas as pd
2
-
3
- from .pd_schemas import data_lookup_type
4
-
5
-
6
- from openforis_whisp.parameters.config_runtime import (
7
- geometry_area_column,
8
- DEFAULT_GEE_DATASETS_LOOKUP_TABLE_PATH,
9
- stats_unit_type_column, # Add this import
10
- )
11
-
12
- from openforis_whisp.reformat import filter_lookup_by_country_codes
13
-
14
- # could embed this in each function below that uses lookup_gee_datasets_df.
15
- lookup_gee_datasets_df: data_lookup_type = pd.read_csv(
16
- DEFAULT_GEE_DATASETS_LOOKUP_TABLE_PATH
17
- )
18
-
19
-
20
- # requires lookup_gee_datasets_df
21
-
22
-
23
- # Add function to detect unit type from dataframe
24
- def detect_unit_type(df, explicit_unit_type=None):
25
- """
26
- Determine the unit type from the dataframe or use the override value.
27
-
28
- Args:
29
- df (DataFrame): Input DataFrame.
30
- explicit_unit_type (str, optional): Override unit type ('ha' or 'percent').
31
-
32
- Returns:
33
- str: The unit type to use for calculations.
34
-
35
- Raises:
36
- ValueError: If the unit type can't be determined and no override is provided,
37
- or if there are mixed unit types in the dataframe.
38
- """
39
- # If override is provided, use it
40
- if explicit_unit_type is not None:
41
- if explicit_unit_type not in ["ha", "percent"]:
42
- raise ValueError(
43
- f"Invalid unit type: {explicit_unit_type}. Must be 'ha' or 'percent'."
44
- )
45
- return explicit_unit_type
46
-
47
- # Check if unit type column exists in the dataframe
48
- if stats_unit_type_column not in df.columns:
49
- raise ValueError(
50
- f"Column '{stats_unit_type_column}' not found in dataframe. "
51
- "Please provide 'explicit_unit_type' parameter to specify the unit type."
52
- )
53
-
54
- # Get unique values from the column
55
- unit_types = df[stats_unit_type_column].unique()
56
-
57
- # Check for mixed unit types
58
- if len(unit_types) > 1:
59
- raise ValueError(
60
- f"Mixed unit types in dataframe: {unit_types}. All rows must use the same unit type."
61
- )
62
-
63
- # Get the single unit type
64
- unit_type = unit_types[0]
65
-
66
- # Validate that the unit type is recognized
67
- if unit_type not in ["ha", "percent"]:
68
- raise ValueError(
69
- f"Unrecognized unit type: {unit_type}. Must be 'ha' or 'percent'."
70
- )
71
-
72
- return unit_type
73
-
74
-
75
- # Update whisp_risk to accept and pass the unit_type parameter
76
- def whisp_risk(
77
- df: data_lookup_type, # CHECK THIS
78
- ind_1_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
79
- ind_2_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
80
- ind_3_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
81
- ind_4_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
82
- ind_5_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
83
- ind_6_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
84
- ind_7_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
85
- ind_8_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
86
- ind_9_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
87
- ind_10_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
88
- ind_11_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
89
- ind_1_input_columns: pd.Series = None, # see lookup_gee_datasets for details
90
- ind_2_input_columns: pd.Series = None, # see lookup_gee_datasets for details
91
- ind_3_input_columns: pd.Series = None, # see lookup_gee_datasets for details
92
- ind_4_input_columns: pd.Series = None, # see lookup_gee_datasets for details
93
- ind_5_input_columns: pd.Series = None, # see lookup_gee_datasets for details
94
- ind_6_input_columns: pd.Series = None, # see lookup_gee_datasets for details
95
- ind_7_input_columns: pd.Series = None, # see lookup_gee_datasets for details
96
- ind_8_input_columns: pd.Series = None, # see lookup_gee_datasets for details
97
- ind_9_input_columns: pd.Series = None, # see lookup_gee_datasets for details
98
- ind_10_input_columns: pd.Series = None, # see lookup_gee_datasets for details
99
- ind_11_input_columns: pd.Series = None, # see lookup_gee_datasets for details
100
- ind_1_name: str = "Ind_01_treecover",
101
- ind_2_name: str = "Ind_02_commodities",
102
- ind_3_name: str = "Ind_03_disturbance_before_2020",
103
- ind_4_name: str = "Ind_04_disturbance_after_2020",
104
- ind_5_name: str = "Ind_05_primary_2020",
105
- ind_6_name: str = "Ind_06_nat_reg_forest_2020",
106
- ind_7_name: str = "Ind_07_planted_plantations_2020",
107
- ind_8_name: str = "Ind_08_planted_plantations_after_2020",
108
- ind_9_name: str = "Ind_09_treecover_after_2020",
109
- ind_10_name: str = "Ind_10_agri_after_2020",
110
- ind_11_name: str = "Ind_11_logging_concession_before_2020",
111
- low_name: str = "no",
112
- high_name: str = "yes",
113
- explicit_unit_type: str = None,
114
- national_codes: list[str] = None, # List of ISO2 country codes to filter by
115
- ) -> data_lookup_type:
116
- """
117
- Adds the EUDR (European Union Deforestation Risk) column to the DataFrame based on indicator values.
118
-
119
- Args:
120
- df (DataFrame): Input DataFrame.
121
- ind_1_pcent_threshold (int, optional): Percentage threshold for the first indicator. Defaults to 10.
122
- ind_2_pcent_threshold (int, optional): Percentage threshold for the second indicator. Defaults to 10.
123
- ind_3_pcent_threshold (int, optional): Percentage threshold for the third indicator. Defaults to 0.
124
- ind_4_pcent_threshold (int, optional): Percentage threshold for the fourth indicator. Defaults to 0.
125
- ind_1_input_columns (list, optional): List of input columns for the first indicator. Defaults to columns for the treecover theme.
126
- ind_2_input_columns (list, optional): List of input columns for the second indicator. Defaults to columns for the commodities theme.
127
- ind_3_input_columns (list, optional): List of input columns for the third indicator. Defaults to columns for disturbance before 2020.
128
- ind_4_input_columns (list, optional): List of input columns for the fourth indicator. Defaults to columns for disturbance after 2020.
129
- ind_1_name (str, optional): Name of the first indicator column. Defaults to "Indicator_1_treecover".
130
- ind_2_name (str, optional): Name of the second indicator column. Defaults to "Indicator_2_commodities".
131
- ind_3_name (str, optional): Name of the third indicator column. Defaults to "Indicator_3_disturbance_before_2020".
132
- ind_4_name (str, optional): Name of the fourth indicator column. Defaults to "Indicator_4_disturbance_after_2020".
133
- low_name (str, optional): Value shown in table if less than or equal to the threshold. Defaults to "no".
134
- high_name (str, optional): Value shown in table if more than the threshold. Defaults to "yes".
135
- explicit_unit_type (str, optional): Override the autodetected unit type ('ha' or 'percent').
136
- If not provided, will detect from dataframe 'unit' column.
137
-
138
- Returns:
139
- data_lookup_type: DataFrame with added 'EUDR_risk' column.
140
- """
141
- # Determine the unit type to use based on input data and overrid
142
- unit_type = detect_unit_type(df, explicit_unit_type)
143
-
144
- print(f"Using unit type: {unit_type}")
145
-
146
- lookup_df_copy = lookup_gee_datasets_df.copy()
147
-
148
- # filter by national codes (even if None - this removes all country columns unless specified)
149
- filtered_lookup_gee_datasets_df = filter_lookup_by_country_codes(
150
- lookup_df=lookup_df_copy,
151
- filter_col="ISO2_code",
152
- national_codes=national_codes,
153
- )
154
-
155
- # Rest of the function remains the same, but pass unit_type to add_indicators
156
- if ind_1_input_columns is None:
157
- ind_1_input_columns = get_cols_ind_01_treecover(filtered_lookup_gee_datasets_df)
158
- if ind_2_input_columns is None:
159
- ind_2_input_columns = get_cols_ind_02_commodities(
160
- filtered_lookup_gee_datasets_df
161
- )
162
- if ind_3_input_columns is None:
163
- ind_3_input_columns = get_cols_ind_03_dist_before_2020(
164
- filtered_lookup_gee_datasets_df
165
- )
166
- if ind_4_input_columns is None:
167
- ind_4_input_columns = get_cols_ind_04_dist_after_2020(
168
- filtered_lookup_gee_datasets_df
169
- )
170
- if ind_5_input_columns is None:
171
- ind_5_input_columns = get_cols_ind_05_primary_2020(
172
- filtered_lookup_gee_datasets_df
173
- )
174
- if ind_6_input_columns is None:
175
- ind_6_input_columns = get_cols_ind_06_nat_reg_2020(
176
- filtered_lookup_gee_datasets_df
177
- )
178
- if ind_7_input_columns is None:
179
- ind_7_input_columns = get_cols_ind_07_planted_2020(
180
- filtered_lookup_gee_datasets_df
181
- )
182
- if ind_8_input_columns is None:
183
- ind_8_input_columns = get_cols_ind_08_planted_after_2020(
184
- filtered_lookup_gee_datasets_df
185
- )
186
- if ind_9_input_columns is None:
187
- ind_9_input_columns = get_cols_ind_09_treecover_after_2020(
188
- filtered_lookup_gee_datasets_df
189
- )
190
- if ind_10_input_columns is None:
191
- ind_10_input_columns = get_cols_ind_10_agri_after_2020(
192
- filtered_lookup_gee_datasets_df
193
- )
194
- if ind_11_input_columns is None:
195
- ind_11_input_columns = get_cols_ind_11_logging_before_2020(
196
- filtered_lookup_gee_datasets_df
197
- )
198
-
199
- # Check range of values
200
- check_range(ind_1_pcent_threshold)
201
- check_range(ind_2_pcent_threshold)
202
- check_range(ind_3_pcent_threshold)
203
- check_range(ind_4_pcent_threshold)
204
- check_range(ind_5_pcent_threshold)
205
- check_range(ind_6_pcent_threshold)
206
- check_range(ind_7_pcent_threshold)
207
- check_range(ind_8_pcent_threshold)
208
- check_range(ind_9_pcent_threshold)
209
- check_range(ind_10_pcent_threshold)
210
- check_range(ind_11_pcent_threshold)
211
-
212
- input_cols = [
213
- ind_1_input_columns,
214
- ind_2_input_columns,
215
- ind_3_input_columns,
216
- ind_4_input_columns,
217
- ind_5_input_columns,
218
- ind_6_input_columns,
219
- ind_7_input_columns,
220
- ind_8_input_columns,
221
- ind_9_input_columns,
222
- ind_10_input_columns,
223
- ind_11_input_columns,
224
- ]
225
- thresholds = [
226
- ind_1_pcent_threshold,
227
- ind_2_pcent_threshold,
228
- ind_3_pcent_threshold,
229
- ind_4_pcent_threshold,
230
- ind_5_pcent_threshold,
231
- ind_6_pcent_threshold,
232
- ind_7_pcent_threshold,
233
- ind_8_pcent_threshold,
234
- ind_9_pcent_threshold,
235
- ind_10_pcent_threshold,
236
- ind_11_pcent_threshold,
237
- ]
238
- names = [
239
- ind_1_name,
240
- ind_2_name,
241
- ind_3_name,
242
- ind_4_name,
243
- ind_5_name,
244
- ind_6_name,
245
- ind_7_name,
246
- ind_8_name,
247
- ind_9_name,
248
- ind_10_name,
249
- ind_11_name,
250
- ]
251
- [check_range(threshold) for threshold in thresholds]
252
-
253
- df_w_indicators = add_indicators(
254
- df,
255
- input_cols,
256
- thresholds,
257
- names,
258
- low_name,
259
- high_name,
260
- unit_type, # Pass the unit type
261
- )
262
-
263
- df_w_indicators_and_risk_pcrop = add_eudr_risk_pcrop_col(
264
- df=df_w_indicators,
265
- ind_1_name=ind_1_name,
266
- ind_2_name=ind_2_name,
267
- ind_3_name=ind_3_name,
268
- ind_4_name=ind_4_name,
269
- )
270
-
271
- df_w_indicators_and_risk_acrop = add_eudr_risk_acrop_col(
272
- df=df_w_indicators,
273
- ind_1_name=ind_1_name,
274
- ind_2_name=ind_2_name,
275
- ind_3_name=ind_3_name,
276
- ind_4_name=ind_4_name,
277
- )
278
-
279
- df_w_indicators_and_risk_timber = add_eudr_risk_timber_col(
280
- df=df_w_indicators,
281
- ind_1_name=ind_1_name,
282
- ind_2_name=ind_2_name,
283
- ind_3_name=ind_3_name,
284
- ind_4_name=ind_4_name,
285
- ind_5_name=ind_5_name,
286
- ind_6_name=ind_6_name,
287
- ind_7_name=ind_7_name,
288
- ind_8_name=ind_8_name,
289
- ind_9_name=ind_9_name,
290
- ind_10_name=ind_10_name,
291
- ind_11_name=ind_11_name,
292
- )
293
-
294
- return df_w_indicators_and_risk_timber
295
-
296
-
297
- def add_eudr_risk_pcrop_col(
298
- df: data_lookup_type,
299
- ind_1_name: str,
300
- ind_2_name: str,
301
- ind_3_name: str,
302
- ind_4_name: str,
303
- ) -> data_lookup_type:
304
- """
305
- Adds the EUDR (European Union Deforestation Risk) column to the DataFrame based on indicator values.
306
-
307
- Args:
308
- df (DataFrame): Input DataFrame.
309
- ind_1_name (str): Name of first indicator column.
310
- ind_2_name (str): Name of second indicator column.
311
- ind_3_name (str): Name of third indicator column.
312
- ind_4_name (str): Name of fourth indicator column.
313
-
314
- Returns:
315
- DataFrame: DataFrame with added 'EUDR_risk' column.
316
- """
317
-
318
- for index, row in df.iterrows():
319
- # If any of the first three indicators suggest low risk, set EUDR_risk to "low"
320
- if (
321
- row[ind_1_name] == "no"
322
- or row[ind_2_name] == "yes"
323
- or row[ind_3_name] == "yes"
324
- ):
325
- df.at[index, "risk_pcrop"] = "low"
326
- # If none of the first three indicators suggest low risk and Indicator 4 suggests no risk, set EUDR_risk to "more_info_needed"
327
- elif row[ind_4_name] == "no":
328
- df.at[index, "risk_pcrop"] = "more_info_needed"
329
- # If none of the above conditions are met, set EUDR_risk to "high"
330
- else:
331
- df.at[index, "risk_pcrop"] = "high"
332
-
333
- return df
334
-
335
-
336
- def add_eudr_risk_acrop_col(
337
- df: data_lookup_type,
338
- ind_1_name: str,
339
- ind_2_name: str,
340
- ind_3_name: str,
341
- ind_4_name: str,
342
- ) -> data_lookup_type:
343
- """
344
- Adds the EUDR (European Union Deforestation Risk) column to the DataFrame based on indicator values.
345
-
346
- Args:
347
- df (DataFrame): Input DataFrame.
348
- ind_1_name (str, optional): Name of first indicator column. Defaults to "Indicator_1_treecover".
349
- ind_2_name (str, optional): Name of second indicator column. Defaults to "Indicator_2_commodities".
350
- ind_3_name (str, optional): Name of third indicator column. Defaults to "Indicator_3_disturbance_before_2020".
351
- ind_4_name (str, optional): Name of fourth indicator column. Defaults to "Indicator_4_disturbance_after_2020".
352
-
353
- Returns:
354
- DataFrame: DataFrame with added 'EUDR_risk' column.
355
- """
356
-
357
- # soy risk
358
- for index, row in df.iterrows():
359
- # If there is no tree cover in 2020, set EUDR_risk_soy to "low"
360
- if row[ind_1_name] == "no" or row[ind_2_name] == "yes":
361
- df.at[index, "risk_acrop"] = "low"
362
- # If there is tree cover in 2020 and distrubances post 2020, set EUDR_risk_soy to "high"
363
- elif row[ind_1_name] == "yes" and row[ind_4_name] == "yes":
364
- df.at[index, "risk_acrop"] = "high"
365
- # If tree cover and no disturbances post 2020, set EUDR_risk to "more_info_needed"
366
- else:
367
- df.at[index, "risk_acrop"] = "more_info_needed"
368
-
369
- return df
370
-
371
-
372
- def add_eudr_risk_timber_col(
373
- df: data_lookup_type,
374
- ind_1_name: str,
375
- ind_2_name: str,
376
- ind_3_name: str,
377
- ind_4_name: str,
378
- ind_5_name: str,
379
- ind_6_name: str,
380
- ind_7_name: str,
381
- ind_8_name: str,
382
- ind_9_name: str,
383
- ind_10_name: str,
384
- ind_11_name: str,
385
- ) -> data_lookup_type:
386
- """
387
- Adds the EUDR (European Union Deforestation Risk) column to the DataFrame based on indicator values.
388
-
389
- Args:
390
- df (DataFrame): Input DataFrame.
391
- ind_1_name (str, optional): Name of first indicator column. Defaults to "Indicator_1_treecover".
392
- ind_2_name (str, optional): Name of second indicator column. Defaults to "Indicator_2_commodities".
393
- ind_3_name (str, optional): Name of third indicator column. Defaults to "Indicator_3_disturbance_before_2020".
394
- ind_4_name (str, optional): Name of fourth indicator column. Defaults to "Indicator_4_disturbance_after_2020".
395
-
396
- Returns:
397
- DataFrame: DataFrame with added 'EUDR_risk' column.
398
- """
399
-
400
- for index, row in df.iterrows():
401
- # If there is a commodity in 2020 OR if there is planted-plantation in 2020 AND no agriculture in 2023, set EUDR_risk_degrad to "low"
402
- if row[ind_2_name] == "yes" or (
403
- row[ind_7_name] == "yes" and row[ind_10_name] == "no"
404
- ):
405
- df.at[index, "risk_timber"] = "low"
406
- # If there is no tree cover, set EUDR_risk_degrad to "low"? no because of unstocked forests
407
- # if row[ind_1_name] == "no" or row[ind_3_name] == "yes" or row[ind_7_name] == "yes":
408
- # df.at[index, 'EUDR_risk_degrad'] = "low"
409
- # If primary or naturally regenerating or planted forest in 2020 AND agricultural use in 2023, set EUDR_risk to high
410
- elif (
411
- row[ind_5_name] == "yes"
412
- or row[ind_6_name] == "yes"
413
- or row[ind_7_name] == "yes"
414
- ) and row[ind_10_name] == "yes":
415
- df.at[index, "risk_timber"] = "high"
416
- # If primary or naturally regenerating AND planted post 2020, set EUDR_risk to "high"
417
- elif (row[ind_5_name] == "yes" or row[ind_6_name] == "yes") and row[
418
- ind_8_name
419
- ] == "yes":
420
- df.at[index, "risk_timber"] = "high"
421
- # If primary or naturally regenerating or planted forest in 2020 and OWL in 2023, set EUDR_risk to high
422
- # elif (row[ind_5_name] == "yes" or row[ind_6_name] == "yes" or row[ind_7_name] == "yes") and row[ind_10_name] == "yes":
423
- # df.at[index, 'EUDR_risk_timber'] = "high"
424
-
425
- # If primary forest OR naturally regenerating AND an information on management practice OR tree cover post 2020, set EUDR_risk_degrad to "low"
426
- elif (row[ind_5_name] == "yes" or row[ind_6_name] == "yes") and (
427
- row[ind_9_name] == "yes" or row[ind_11_name] == "yes"
428
- ):
429
- df.at[index, "risk_timber"] = "low"
430
- # If primary or naturally regenerating and no other info, set EUDR_risk to "more_info_needed"
431
- elif row[ind_5_name] == "yes" or row[ind_6_name] == "yes":
432
- df.at[index, "risk_timber"] = "more_info_needed"
433
- # If none of the above conditions are met, set EUDR_risk to "high"
434
- else:
435
- df.at[index, "risk_timber"] = "high"
436
-
437
- return df
438
-
439
-
440
- def add_indicators(
441
- df: data_lookup_type,
442
- input_cols: list[str],
443
- thresholds: list[float],
444
- names: list[str],
445
- low_name: str = "no",
446
- high_name: str = "yes",
447
- unit_type: str = None,
448
- ) -> data_lookup_type:
449
- for input_col, threshold, name in zip(input_cols, thresholds, names):
450
- df = add_indicator_column(
451
- df=df,
452
- input_columns=input_col,
453
- threshold=threshold,
454
- new_column_name=name,
455
- low_name=low_name,
456
- high_name=high_name,
457
- sum_comparison=False,
458
- unit_type=unit_type, # Pass the unit type
459
- )
460
- return df
461
-
462
-
463
- # Update add_indicator_column to use the unit_type parameter
464
- def add_indicator_column(
465
- df: data_lookup_type,
466
- input_columns: list[str],
467
- threshold: float,
468
- new_column_name: str,
469
- low_name: str = "no",
470
- high_name: str = "yes",
471
- sum_comparison: bool = False,
472
- unit_type: str = None, # unit_type parameter
473
- ) -> data_lookup_type:
474
- """
475
- Add a new column to the DataFrame based on the specified columns, threshold, and comparison sign.
476
-
477
- Parameters:
478
- df (data_lookup_type): The pandas DataFrame to which the column will be added.
479
- input_columns (list): List of column names to check for threshold.
480
- threshold (float): The threshold value to compare against.
481
- new_column_name (str): The name of the new column to be added.
482
- The '>' sign is used for comparisons.
483
- When 'sum comparison' == True, then the threshold is compared to the sum of all those listed in 'input_columns', as opposed to when Flalse, when each column in the list is compared to the threshold individually
484
- low_name (str): The name for the value when below or equal to threshold (default is 'no').
485
- high_name (str): The name for the value when above threshold (default is 'yes').
486
- sum_comparison (bool): If True, sum all values in input_columns and compare to threshold (default is False).
487
- unit_type (str): Whether values are in "ha" or "percent".
488
-
489
- Returns:
490
- data_lookup_type: The DataFrame with the new column added.
491
- """
492
- # Create a new column and initialize with low_name
493
- new_column = pd.Series(low_name, index=df.index, name=new_column_name)
494
-
495
- # Default behavior: use '>' for single column comparison
496
- if sum_comparison:
497
- # Sum all values in specified columns and compare to threshold
498
- sum_values = df[input_columns].sum(axis=1)
499
- new_column[sum_values > threshold] = high_name
500
- else:
501
- # Check if any values in specified columns are above the threshold and update the new column accordingly
502
- for col in input_columns:
503
- # So that threshold is always in percent, if outputs are in ha, the code converts to percent (based on dividing by the geometry_area_column column.
504
- # Clamping is needed due to differences in decimal places (meaning input values may go just over 100)
505
- if unit_type == "ha":
506
- df[geometry_area_column] = pd.to_numeric(
507
- df[geometry_area_column], errors="coerce"
508
- )
509
- val_to_check = clamp(
510
- ((df[col] / df[geometry_area_column]) * 100), 0, 100
511
- )
512
- else:
513
- val_to_check = df[col]
514
- new_column[val_to_check > threshold] = high_name
515
-
516
- # Concatenate the new column to the DataFrame
517
- df = pd.concat([df, new_column], axis=1)
518
- return df
519
-
520
-
521
- def get_cols_ind_01_treecover(lookup_gee_datasets_df):
522
- """
523
- Generate a list of dataset names for the treecover theme, excluding those marked for exclusion.
524
-
525
- Args:
526
- lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
527
-
528
- Returns:
529
- list: List of dataset names set to be used in the risk calculations for the treecover theme, excluding those marked for exclusion.
530
- """
531
- lookup_gee_datasets_df = lookup_gee_datasets_df[
532
- lookup_gee_datasets_df["exclude_from_output"] != 1
533
- ]
534
- return list(
535
- lookup_gee_datasets_df["name"][
536
- (lookup_gee_datasets_df["use_for_risk"] == 1)
537
- & (lookup_gee_datasets_df["theme"] == "treecover")
538
- ]
539
- )
540
-
541
-
542
- def get_cols_ind_02_commodities(lookup_gee_datasets_df):
543
- """
544
- Generate a list of dataset names for the commodities theme, excluding those marked for exclusion.
545
-
546
- Args:
547
- lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
548
-
549
- Returns:
550
- list: List of dataset names set to be used in the risk calculations for the commodities theme, excluding those marked for exclusion.
551
- """
552
- lookup_gee_datasets_df = lookup_gee_datasets_df[
553
- lookup_gee_datasets_df["exclude_from_output"] != 1
554
- ]
555
- return list(
556
- lookup_gee_datasets_df["name"][
557
- (lookup_gee_datasets_df["use_for_risk"] == 1)
558
- & (lookup_gee_datasets_df["theme"] == "commodities")
559
- ]
560
- )
561
-
562
-
563
- def get_cols_ind_03_dist_before_2020(lookup_gee_datasets_df):
564
- """
565
- Generate a list of dataset names for the disturbance before 2020 theme, excluding those marked for exclusion.
566
-
567
- Args:
568
- lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
569
-
570
- Returns:
571
- list: List of dataset names set to be used in the risk calculations for the disturbance before 2020 theme, excluding those marked for exclusion.
572
- """
573
- lookup_gee_datasets_df = lookup_gee_datasets_df[
574
- lookup_gee_datasets_df["exclude_from_output"] != 1
575
- ]
576
- return list(
577
- lookup_gee_datasets_df["name"][
578
- (lookup_gee_datasets_df["use_for_risk"] == 1)
579
- & (lookup_gee_datasets_df["theme"] == "disturbance_before")
580
- ]
581
- )
582
-
583
-
584
- def get_cols_ind_04_dist_after_2020(lookup_gee_datasets_df):
585
- """
586
- Generate a list of dataset names for the disturbance after 2020 theme, excluding those marked for exclusion.
587
-
588
- Args:
589
- lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
590
-
591
- Returns:
592
- list: List of dataset names set to be used in the risk calculations for the disturbance after 2020 theme, excluding those marked for exclusion.
593
- """
594
- lookup_gee_datasets_df = lookup_gee_datasets_df[
595
- lookup_gee_datasets_df["exclude_from_output"] != 1
596
- ]
597
- return list(
598
- lookup_gee_datasets_df["name"][
599
- (lookup_gee_datasets_df["use_for_risk"] == 1)
600
- & (lookup_gee_datasets_df["theme"] == "disturbance_after")
601
- ]
602
- )
603
-
604
-
605
- def get_cols_ind_05_primary_2020(lookup_gee_datasets_df):
606
- """
607
- Generate a list of dataset names for primary forests in 2020
608
-
609
- Args:
610
- lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
611
-
612
- Returns:
613
- list: List of dataset names set to be used in the risk calculations for the degradation - primary forest in 2020, excluding those marked for exclusion.
614
- """
615
- lookup_gee_datasets_df = lookup_gee_datasets_df[
616
- lookup_gee_datasets_df["exclude_from_output"] != 1
617
- ]
618
- return list(
619
- lookup_gee_datasets_df["name"][
620
- (lookup_gee_datasets_df["use_for_risk_timber"] == 1)
621
- & (lookup_gee_datasets_df["theme_timber"] == "primary")
622
- ]
623
- )
624
-
625
-
626
- def get_cols_ind_06_nat_reg_2020(lookup_gee_datasets_df):
627
- """
628
- Generate a list of dataset names for naturally_reg_2020 forests in 2020
629
-
630
- Args:
631
- lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
632
-
633
- Returns:
634
- list: List of dataset names set to be used in the risk calculations for the degradation - naturally_reg_2020 in 2020, excluding those marked for exclusion.
635
- """
636
- lookup_gee_datasets_df = lookup_gee_datasets_df[
637
- lookup_gee_datasets_df["exclude_from_output"] != 1
638
- ]
639
- return list(
640
- lookup_gee_datasets_df["name"][
641
- (lookup_gee_datasets_df["use_for_risk_timber"] == 1)
642
- & (lookup_gee_datasets_df["theme_timber"] == "naturally_reg_2020")
643
- ]
644
- )
645
-
646
-
647
- def get_cols_ind_07_planted_2020(lookup_gee_datasets_df):
648
- """
649
- Generate a list of dataset names for planted and plantation forests in 2020
650
-
651
- Args:
652
- lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
653
-
654
- Returns:
655
- list: List of dataset names set to be used in the risk calculations for the degradation - planted and plantation forests in 2020, excluding those marked for exclusion.
656
- """
657
- lookup_gee_datasets_df = lookup_gee_datasets_df[
658
- lookup_gee_datasets_df["exclude_from_output"] != 1
659
- ]
660
- return list(
661
- lookup_gee_datasets_df["name"][
662
- (lookup_gee_datasets_df["use_for_risk_timber"] == 1)
663
- & (lookup_gee_datasets_df["theme_timber"] == "planted_plantation_2020")
664
- ]
665
- )
666
-
667
-
668
- def get_cols_ind_08_planted_after_2020(lookup_gee_datasets_df):
669
- """
670
- Generate a list of dataset names for planted and plantation forests post 2020
671
-
672
- Args:
673
- lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
674
-
675
- Returns:
676
- list: List of dataset names set to be used in the risk calculations for the degradation - planted and plantation forests post 2020, excluding those marked for exclusion.
677
- """
678
- lookup_gee_datasets_df = lookup_gee_datasets_df[
679
- lookup_gee_datasets_df["exclude_from_output"] != 1
680
- ]
681
- return list(
682
- lookup_gee_datasets_df["name"][
683
- (lookup_gee_datasets_df["use_for_risk_timber"] == 1)
684
- & (
685
- lookup_gee_datasets_df["theme_timber"]
686
- == "planted_plantation_after_2020"
687
- )
688
- ]
689
- )
690
-
691
-
692
- def get_cols_ind_09_treecover_after_2020(lookup_gee_datasets_df):
693
- """
694
- Generate a list of dataset names for treecover post 2020
695
-
696
- Args:
697
- lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
698
-
699
- Returns:
700
- list: List of dataset names set to be used in the risk calculations for the degradation - treecover post 2020, excluding those marked for exclusion.
701
- """
702
- lookup_gee_datasets_df = lookup_gee_datasets_df[
703
- lookup_gee_datasets_df["exclude_from_output"] != 1
704
- ]
705
- return list(
706
- lookup_gee_datasets_df["name"][
707
- (lookup_gee_datasets_df["use_for_risk_timber"] == 1)
708
- & (lookup_gee_datasets_df["theme_timber"] == "treecover_post2020")
709
- ]
710
- )
711
-
712
-
713
- def get_cols_ind_10_agri_after_2020(lookup_gee_datasets_df):
714
- """
715
- Generate a list of dataset names for croplands post 2020
716
-
717
- Args:
718
- lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
719
-
720
- Returns:
721
- list: List of dataset names set to be used in the risk calculations for the degradation - croplands post 2020, excluding those marked for exclusion.
722
- """
723
- lookup_gee_datasets_df = lookup_gee_datasets_df[
724
- lookup_gee_datasets_df["exclude_from_output"] != 1
725
- ]
726
- return list(
727
- lookup_gee_datasets_df["name"][
728
- (lookup_gee_datasets_df["use_for_risk_timber"] == 1)
729
- & (lookup_gee_datasets_df["theme_timber"] == "agri_after_2020")
730
- ]
731
- )
732
-
733
-
734
- def get_cols_ind_11_logging_before_2020(lookup_gee_datasets_df):
735
- """
736
- Generate a list of dataset names for logging concessions (2020 if available)
737
-
738
- Args:
739
- lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
740
-
741
- Returns:
742
- list: List of dataset names set to be used in the risk calculations for the degradation - logging concessions, excluding those marked for exclusion.
743
- """
744
- lookup_gee_datasets_df = lookup_gee_datasets_df[
745
- lookup_gee_datasets_df["exclude_from_output"] != 1
746
- ]
747
- return list(
748
- lookup_gee_datasets_df["name"][
749
- (lookup_gee_datasets_df["use_for_risk_timber"] == 1)
750
- & (lookup_gee_datasets_df["theme_timber"] == "logging_concession")
751
- ]
752
- )
753
-
754
-
755
- def clamp(
756
- value: float | pd.Series, min_val: float, max_val: float
757
- ) -> float | pd.Series:
758
- """
759
- Clamp a value or a Pandas Series within a specified range.
760
-
761
- Args:
762
- value (float | pd.Series): The value or series to be clamped.
763
- min_val (float): The minimum value of the range.
764
- max_val (float): The maximum value of the range.
765
-
766
- Returns:
767
- float | pd.Series: The clamped value or series within the range.
768
- """
769
- if isinstance(value, pd.Series):
770
- return value.clip(lower=min_val, upper=max_val)
771
- else:
772
- return max(min_val, min(value, max_val))
773
-
774
-
775
- def check_range(value: float) -> None:
776
- if not (0 <= value <= 100):
777
- raise ValueError("Value must be between 0 and 100.")
1
+ import pandas as pd
2
+
3
+ from .pd_schemas import data_lookup_type
4
+
5
+
6
+ from openforis_whisp.parameters.config_runtime import (
7
+ geometry_area_column,
8
+ DEFAULT_GEE_DATASETS_LOOKUP_TABLE_PATH,
9
+ stats_unit_type_column, # Add this import
10
+ )
11
+
12
+ from openforis_whisp.reformat import filter_lookup_by_country_codes
13
+
14
+ # could embed this in each function below that uses lookup_gee_datasets_df.
15
+ lookup_gee_datasets_df: data_lookup_type = pd.read_csv(
16
+ DEFAULT_GEE_DATASETS_LOOKUP_TABLE_PATH
17
+ )
18
+
19
+
20
+ # requires lookup_gee_datasets_df
21
+
22
+
23
+ # Add function to detect unit type from dataframe
24
+ def detect_unit_type(df, explicit_unit_type=None):
25
+ """
26
+ Determine the unit type from the dataframe or use the override value.
27
+
28
+ Args:
29
+ df (DataFrame): Input DataFrame.
30
+ explicit_unit_type (str, optional): Override unit type ('ha' or 'percent').
31
+
32
+ Returns:
33
+ str: The unit type to use for calculations.
34
+
35
+ Raises:
36
+ ValueError: If the unit type can't be determined and no override is provided,
37
+ or if there are mixed unit types in the dataframe.
38
+ """
39
+ # If override is provided, use it
40
+ if explicit_unit_type is not None:
41
+ if explicit_unit_type not in ["ha", "percent"]:
42
+ raise ValueError(
43
+ f"Invalid unit type: {explicit_unit_type}. Must be 'ha' or 'percent'."
44
+ )
45
+ return explicit_unit_type
46
+
47
+ # Check if unit type column exists in the dataframe
48
+ if stats_unit_type_column not in df.columns:
49
+ raise ValueError(
50
+ f"Column '{stats_unit_type_column}' not found in dataframe. "
51
+ "Please provide 'explicit_unit_type' parameter to specify the unit type."
52
+ )
53
+
54
+ # Get unique values from the column
55
+ unit_types = df[stats_unit_type_column].unique()
56
+
57
+ # Check for mixed unit types
58
+ if len(unit_types) > 1:
59
+ raise ValueError(
60
+ f"Mixed unit types in dataframe: {unit_types}. All rows must use the same unit type."
61
+ )
62
+
63
+ # Get the single unit type
64
+ unit_type = unit_types[0]
65
+
66
+ # Validate that the unit type is recognized
67
+ if unit_type not in ["ha", "percent"]:
68
+ raise ValueError(
69
+ f"Unrecognized unit type: {unit_type}. Must be 'ha' or 'percent'."
70
+ )
71
+
72
+ return unit_type
73
+
74
+
75
+ # Update whisp_risk to accept and pass the unit_type parameter
76
+ def whisp_risk(
77
+ df: data_lookup_type, # CHECK THIS
78
+ ind_1_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
79
+ ind_2_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
80
+ ind_3_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
81
+ ind_4_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
82
+ ind_5_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
83
+ ind_6_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
84
+ ind_7_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
85
+ ind_8_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
86
+ ind_9_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
87
+ ind_10_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
88
+ ind_11_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
89
+ ind_1_input_columns: pd.Series = None, # see lookup_gee_datasets for details
90
+ ind_2_input_columns: pd.Series = None, # see lookup_gee_datasets for details
91
+ ind_3_input_columns: pd.Series = None, # see lookup_gee_datasets for details
92
+ ind_4_input_columns: pd.Series = None, # see lookup_gee_datasets for details
93
+ ind_5_input_columns: pd.Series = None, # see lookup_gee_datasets for details
94
+ ind_6_input_columns: pd.Series = None, # see lookup_gee_datasets for details
95
+ ind_7_input_columns: pd.Series = None, # see lookup_gee_datasets for details
96
+ ind_8_input_columns: pd.Series = None, # see lookup_gee_datasets for details
97
+ ind_9_input_columns: pd.Series = None, # see lookup_gee_datasets for details
98
+ ind_10_input_columns: pd.Series = None, # see lookup_gee_datasets for details
99
+ ind_11_input_columns: pd.Series = None, # see lookup_gee_datasets for details
100
+ ind_1_name: str = "Ind_01_treecover",
101
+ ind_2_name: str = "Ind_02_commodities",
102
+ ind_3_name: str = "Ind_03_disturbance_before_2020",
103
+ ind_4_name: str = "Ind_04_disturbance_after_2020",
104
+ ind_5_name: str = "Ind_05_primary_2020",
105
+ ind_6_name: str = "Ind_06_nat_reg_forest_2020",
106
+ ind_7_name: str = "Ind_07_planted_plantations_2020",
107
+ ind_8_name: str = "Ind_08_planted_plantations_after_2020",
108
+ ind_9_name: str = "Ind_09_treecover_after_2020",
109
+ ind_10_name: str = "Ind_10_agri_after_2020",
110
+ ind_11_name: str = "Ind_11_logging_concession_before_2020",
111
+ low_name: str = "no",
112
+ high_name: str = "yes",
113
+ explicit_unit_type: str = None,
114
+ national_codes: list[str] = None, # List of ISO2 country codes to filter by
115
+ ) -> data_lookup_type:
116
+ """
117
+ Adds the EUDR (European Union Deforestation Risk) column to the DataFrame based on indicator values.
118
+
119
+ Args:
120
+ df (DataFrame): Input DataFrame.
121
+ ind_1_pcent_threshold (int, optional): Percentage threshold for the first indicator. Defaults to 10.
122
+ ind_2_pcent_threshold (int, optional): Percentage threshold for the second indicator. Defaults to 10.
123
+ ind_3_pcent_threshold (int, optional): Percentage threshold for the third indicator. Defaults to 0.
124
+ ind_4_pcent_threshold (int, optional): Percentage threshold for the fourth indicator. Defaults to 0.
125
+ ind_1_input_columns (list, optional): List of input columns for the first indicator. Defaults to columns for the treecover theme.
126
+ ind_2_input_columns (list, optional): List of input columns for the second indicator. Defaults to columns for the commodities theme.
127
+ ind_3_input_columns (list, optional): List of input columns for the third indicator. Defaults to columns for disturbance before 2020.
128
+ ind_4_input_columns (list, optional): List of input columns for the fourth indicator. Defaults to columns for disturbance after 2020.
129
+ ind_1_name (str, optional): Name of the first indicator column. Defaults to "Indicator_1_treecover".
130
+ ind_2_name (str, optional): Name of the second indicator column. Defaults to "Indicator_2_commodities".
131
+ ind_3_name (str, optional): Name of the third indicator column. Defaults to "Indicator_3_disturbance_before_2020".
132
+ ind_4_name (str, optional): Name of the fourth indicator column. Defaults to "Indicator_4_disturbance_after_2020".
133
+ low_name (str, optional): Value shown in table if less than or equal to the threshold. Defaults to "no".
134
+ high_name (str, optional): Value shown in table if more than the threshold. Defaults to "yes".
135
+ explicit_unit_type (str, optional): Override the autodetected unit type ('ha' or 'percent').
136
+ If not provided, will detect from dataframe 'unit' column.
137
+
138
+ Returns:
139
+ data_lookup_type: DataFrame with added 'EUDR_risk' column.
140
+ """
141
+ # Determine the unit type to use based on input data and overrid
142
+ unit_type = detect_unit_type(df, explicit_unit_type)
143
+
144
+ print(f"Using unit type: {unit_type}")
145
+
146
+ lookup_df_copy = lookup_gee_datasets_df.copy()
147
+
148
+ # filter by national codes (even if None - this removes all country columns unless specified)
149
+ filtered_lookup_gee_datasets_df = filter_lookup_by_country_codes(
150
+ lookup_df=lookup_df_copy,
151
+ filter_col="ISO2_code",
152
+ national_codes=national_codes,
153
+ )
154
+
155
+ # Rest of the function remains the same, but pass unit_type to add_indicators
156
+ if ind_1_input_columns is None:
157
+ ind_1_input_columns = get_cols_ind_01_treecover(filtered_lookup_gee_datasets_df)
158
+ if ind_2_input_columns is None:
159
+ ind_2_input_columns = get_cols_ind_02_commodities(
160
+ filtered_lookup_gee_datasets_df
161
+ )
162
+ if ind_3_input_columns is None:
163
+ ind_3_input_columns = get_cols_ind_03_dist_before_2020(
164
+ filtered_lookup_gee_datasets_df
165
+ )
166
+ if ind_4_input_columns is None:
167
+ ind_4_input_columns = get_cols_ind_04_dist_after_2020(
168
+ filtered_lookup_gee_datasets_df
169
+ )
170
+ if ind_5_input_columns is None:
171
+ ind_5_input_columns = get_cols_ind_05_primary_2020(
172
+ filtered_lookup_gee_datasets_df
173
+ )
174
+ if ind_6_input_columns is None:
175
+ ind_6_input_columns = get_cols_ind_06_nat_reg_2020(
176
+ filtered_lookup_gee_datasets_df
177
+ )
178
+ if ind_7_input_columns is None:
179
+ ind_7_input_columns = get_cols_ind_07_planted_2020(
180
+ filtered_lookup_gee_datasets_df
181
+ )
182
+ if ind_8_input_columns is None:
183
+ ind_8_input_columns = get_cols_ind_08_planted_after_2020(
184
+ filtered_lookup_gee_datasets_df
185
+ )
186
+ if ind_9_input_columns is None:
187
+ ind_9_input_columns = get_cols_ind_09_treecover_after_2020(
188
+ filtered_lookup_gee_datasets_df
189
+ )
190
+ if ind_10_input_columns is None:
191
+ ind_10_input_columns = get_cols_ind_10_agri_after_2020(
192
+ filtered_lookup_gee_datasets_df
193
+ )
194
+ if ind_11_input_columns is None:
195
+ ind_11_input_columns = get_cols_ind_11_logging_before_2020(
196
+ filtered_lookup_gee_datasets_df
197
+ )
198
+
199
+ # Check range of values
200
+ check_range(ind_1_pcent_threshold)
201
+ check_range(ind_2_pcent_threshold)
202
+ check_range(ind_3_pcent_threshold)
203
+ check_range(ind_4_pcent_threshold)
204
+ check_range(ind_5_pcent_threshold)
205
+ check_range(ind_6_pcent_threshold)
206
+ check_range(ind_7_pcent_threshold)
207
+ check_range(ind_8_pcent_threshold)
208
+ check_range(ind_9_pcent_threshold)
209
+ check_range(ind_10_pcent_threshold)
210
+ check_range(ind_11_pcent_threshold)
211
+
212
+ input_cols = [
213
+ ind_1_input_columns,
214
+ ind_2_input_columns,
215
+ ind_3_input_columns,
216
+ ind_4_input_columns,
217
+ ind_5_input_columns,
218
+ ind_6_input_columns,
219
+ ind_7_input_columns,
220
+ ind_8_input_columns,
221
+ ind_9_input_columns,
222
+ ind_10_input_columns,
223
+ ind_11_input_columns,
224
+ ]
225
+ thresholds = [
226
+ ind_1_pcent_threshold,
227
+ ind_2_pcent_threshold,
228
+ ind_3_pcent_threshold,
229
+ ind_4_pcent_threshold,
230
+ ind_5_pcent_threshold,
231
+ ind_6_pcent_threshold,
232
+ ind_7_pcent_threshold,
233
+ ind_8_pcent_threshold,
234
+ ind_9_pcent_threshold,
235
+ ind_10_pcent_threshold,
236
+ ind_11_pcent_threshold,
237
+ ]
238
+ names = [
239
+ ind_1_name,
240
+ ind_2_name,
241
+ ind_3_name,
242
+ ind_4_name,
243
+ ind_5_name,
244
+ ind_6_name,
245
+ ind_7_name,
246
+ ind_8_name,
247
+ ind_9_name,
248
+ ind_10_name,
249
+ ind_11_name,
250
+ ]
251
+ [check_range(threshold) for threshold in thresholds]
252
+
253
+ df_w_indicators = add_indicators(
254
+ df,
255
+ input_cols,
256
+ thresholds,
257
+ names,
258
+ low_name,
259
+ high_name,
260
+ unit_type, # Pass the unit type
261
+ )
262
+
263
+ df_w_indicators_and_risk_pcrop = add_eudr_risk_pcrop_col(
264
+ df=df_w_indicators,
265
+ ind_1_name=ind_1_name,
266
+ ind_2_name=ind_2_name,
267
+ ind_3_name=ind_3_name,
268
+ ind_4_name=ind_4_name,
269
+ )
270
+
271
+ df_w_indicators_and_risk_acrop = add_eudr_risk_acrop_col(
272
+ df=df_w_indicators,
273
+ ind_1_name=ind_1_name,
274
+ ind_2_name=ind_2_name,
275
+ ind_4_name=ind_4_name,
276
+ )
277
+
278
+ df_w_indicators_and_risk_timber = add_eudr_risk_timber_col(
279
+ df=df_w_indicators,
280
+ ind_2_name=ind_2_name,
281
+ ind_5_name=ind_5_name,
282
+ ind_6_name=ind_6_name,
283
+ ind_7_name=ind_7_name,
284
+ ind_8_name=ind_8_name,
285
+ ind_9_name=ind_9_name,
286
+ ind_10_name=ind_10_name,
287
+ ind_11_name=ind_11_name,
288
+ )
289
+
290
+ return df_w_indicators_and_risk_timber
291
+
292
+
293
+ def add_eudr_risk_pcrop_col(
294
+ df: data_lookup_type,
295
+ ind_1_name: str,
296
+ ind_2_name: str,
297
+ ind_3_name: str,
298
+ ind_4_name: str,
299
+ ) -> data_lookup_type:
300
+ """
301
+ Adds the EUDR (European Union Deforestation Risk) column to the DataFrame based on indicator values.
302
+
303
+ Args:
304
+ df (DataFrame): Input DataFrame.
305
+ ind_1_name (str, optional): Name of first indicator column. Defaults to "Ind_01_treecover".
306
+ ind_2_name (str, optional): Name of second indicator column. Defaults to "Ind_02_commodities".
307
+ ind_3_name (str, optional): Name of third indicator column. Defaults to "Ind_03_disturbance_before_2020".
308
+ ind_4_name (str, optional): Name of fourth indicator column. Defaults to "Ind_04_disturbance_after_2020".
309
+
310
+ Returns:
311
+ DataFrame: DataFrame with added 'EUDR_risk' column.
312
+ """
313
+
314
+ for index, row in df.iterrows():
315
+ # If any of the first three indicators suggest low risk, set EUDR_risk to "low"
316
+ if (
317
+ row[ind_1_name] == "no"
318
+ or row[ind_2_name] == "yes"
319
+ or row[ind_3_name] == "yes"
320
+ ):
321
+ df.at[index, "risk_pcrop"] = "low"
322
+ # If none of the first three indicators suggest low risk and Indicator 4 suggests no risk, set EUDR_risk to "more_info_needed"
323
+ elif row[ind_4_name] == "no":
324
+ df.at[index, "risk_pcrop"] = "more_info_needed"
325
+ # If none of the above conditions are met, set EUDR_risk to "high"
326
+ else:
327
+ df.at[index, "risk_pcrop"] = "high"
328
+
329
+ return df
330
+
331
+
332
+ def add_eudr_risk_acrop_col(
333
+ df: data_lookup_type,
334
+ ind_1_name: str,
335
+ ind_2_name: str,
336
+ ind_4_name: str,
337
+ ) -> data_lookup_type:
338
+ """
339
+ Adds the EUDR (European Union Deforestation Risk) column to the DataFrame based on indicator values.
340
+
341
+ Args:
342
+ df (DataFrame): Input DataFrame.
343
+ ind_1_name (str, optional): Name of first indicator column. Defaults to "Ind_01_treecover".
344
+ ind_2_name (str, optional): Name of second indicator column. Defaults to "Ind_02_commodities".
345
+ ind_4_name (str, optional): Name of fourth indicator column. Defaults to "Ind_04_disturbance_after_2020".
346
+
347
+ Returns:
348
+ DataFrame: DataFrame with added 'EUDR_risk' column.
349
+ """
350
+
351
+ # soy risk
352
+ for index, row in df.iterrows():
353
+ # If there is no tree cover in 2020, set EUDR_risk_soy to "low"
354
+ if row[ind_1_name] == "no" or row[ind_2_name] == "yes":
355
+ df.at[index, "risk_acrop"] = "low"
356
+ # If there is tree cover in 2020 and distrubances post 2020, set EUDR_risk_soy to "high"
357
+ elif row[ind_1_name] == "yes" and row[ind_4_name] == "yes":
358
+ df.at[index, "risk_acrop"] = "high"
359
+ # If tree cover and no disturbances post 2020, set EUDR_risk to "more_info_needed"
360
+ else:
361
+ df.at[index, "risk_acrop"] = "more_info_needed"
362
+
363
+ return df
364
+
365
+
366
+ def add_eudr_risk_timber_col(
367
+ df: data_lookup_type,
368
+ ind_2_name: str,
369
+ ind_5_name: str,
370
+ ind_6_name: str,
371
+ ind_7_name: str,
372
+ ind_8_name: str,
373
+ ind_9_name: str,
374
+ ind_10_name: str,
375
+ ind_11_name: str,
376
+ ) -> data_lookup_type:
377
+ """
378
+ Adds the EUDR (European Union Deforestation Risk) column to the DataFrame based on indicator values.
379
+
380
+ Args:
381
+ df (DataFrame): Input DataFrame.
382
+ ind_2_name (str, optional): Name of second indicator column. Defaults to "Ind_02_commodities".
383
+ ind_5_name (str, optional): Name of fifth indicator column. Defaults to "Ind_05_primary_2020".
384
+ ind_6_name (str, optional): Name of sixth indicator column. Defaults to "Ind_06_nat_reg_forest_2020".
385
+ ind_7_name (str, optional): Name of seventh indicator column. Defaults to "Ind_07_planted_plantations_2020".
386
+ ind_8_name (str, optional): Name of eighth indicator column. Defaults to "Ind_08_planted_plantations_after_2020".
387
+ ind_9_name (str, optional): Name of ninth indicator column. Defaults to "Ind_09_treecover_after_2020".
388
+ ind_10_name (str, optional): Name of tenth indicator column. Defaults to "Ind_10_agri_after_2020".
389
+ ind_11_name (str, optional): Name of eleventh indicator column. Defaults to "Ind_11_logging_concession_before_2020".
390
+
391
+ Returns:
392
+ DataFrame: DataFrame with added 'EUDR_risk' column.
393
+ """
394
+
395
+ for index, row in df.iterrows():
396
+ # If there is a commodity in 2020 (ind_2_name)
397
+ # OR if there is planted-plantation in 2020 (ind_7_name) AND no agriculture in 2023 (ind_10_name), set EUDR_risk_timber to "low"
398
+ if row[ind_2_name] == "yes" or (
399
+ row[ind_7_name] == "yes" and row[ind_10_name] == "no"
400
+ ):
401
+ df.at[index, "risk_timber"] = "low"
402
+ # If there is a natural forest primary (ind_5_name) or naturally regenerating (ind_6_name) or planted forest (ind_7_name) in 2020 AND agricultural after 2020 (ind_10_name), set EUDR_timber to high
403
+ elif (
404
+ row[ind_5_name] == "yes"
405
+ or row[ind_6_name] == "yes"
406
+ or row[ind_7_name] == "yes"
407
+ ) and row[ind_10_name] == "yes":
408
+ df.at[index, "risk_timber"] = "high"
409
+ # If there is a natural forest primary (ind_5_name) or naturally regenerating (ind_6_name) AND planted after 2020 (ind_8_name), set EUDR_risk to "high"
410
+ elif (row[ind_5_name] == "yes" or row[ind_6_name] == "yes") and row[
411
+ ind_8_name
412
+ ] == "yes":
413
+ df.at[index, "risk_timber"] = "high"
414
+ # No data yet on OWL conversion
415
+ # If primary or naturally regenerating or planted forest in 2020 and OWL in 2023, set EUDR_risk to high
416
+ # elif (row[ind_5_name] == "yes" or row[ind_6_name] == "yes" or row[ind_7_name] == "yes") and row[ind_10_name] == "yes":
417
+ # df.at[index, 'EUDR_risk_timber'] = "high"
418
+
419
+ # If there is a natural primary forest (ind_5_name) OR naturally regenerating in 2020 (ind_6_name) AND an information on management practice any time (ind_11_name) OR tree cover or regrowth post 2020 (ind_9_name), set EUDR_risk_timber to "low"
420
+ elif (row[ind_5_name] == "yes" or row[ind_6_name] == "yes") and (
421
+ row[ind_9_name] == "yes" or row[ind_11_name] == "yes"
422
+ ):
423
+ df.at[index, "risk_timber"] = "low"
424
+ # If primary (ind_5_name) OR naturally regenerating in 2020 (ind_6_name) and no other info, set EUDR_risk to "more_info_needed"
425
+ elif row[ind_5_name] == "yes" or row[ind_6_name] == "yes":
426
+ df.at[index, "risk_timber"] = "more_info_needed"
427
+ # If none of the above conditions are met, set EUDR_risk to "low"
428
+ else:
429
+ df.at[index, "risk_timber"] = "low"
430
+
431
+ return df
432
+
433
+
434
+ def add_indicators(
435
+ df: data_lookup_type,
436
+ input_cols: list[str],
437
+ thresholds: list[float],
438
+ names: list[str],
439
+ low_name: str = "no",
440
+ high_name: str = "yes",
441
+ unit_type: str = None,
442
+ ) -> data_lookup_type:
443
+ for input_col, threshold, name in zip(input_cols, thresholds, names):
444
+ df = add_indicator_column(
445
+ df=df,
446
+ input_columns=input_col,
447
+ threshold=threshold,
448
+ new_column_name=name,
449
+ low_name=low_name,
450
+ high_name=high_name,
451
+ sum_comparison=False,
452
+ unit_type=unit_type, # Pass the unit type
453
+ )
454
+ return df
455
+
456
+
457
+ # Update add_indicator_column to use the unit_type parameter
458
+ def add_indicator_column(
459
+ df: data_lookup_type,
460
+ input_columns: list[str],
461
+ threshold: float,
462
+ new_column_name: str,
463
+ low_name: str = "no",
464
+ high_name: str = "yes",
465
+ sum_comparison: bool = False,
466
+ unit_type: str = None, # unit_type parameter
467
+ ) -> data_lookup_type:
468
+ """
469
+ Add a new column to the DataFrame based on the specified columns, threshold, and comparison sign.
470
+
471
+ Parameters:
472
+ df (data_lookup_type): The pandas DataFrame to which the column will be added.
473
+ input_columns (list): List of column names to check for threshold.
474
+ threshold (float): The threshold value to compare against.
475
+ new_column_name (str): The name of the new column to be added.
476
+ The '>' sign is used for comparisons.
477
+ When 'sum comparison' == True, then the threshold is compared to the sum of all those listed in 'input_columns', as opposed to when Flalse, when each column in the list is compared to the threshold individually
478
+ low_name (str): The name for the value when below or equal to threshold (default is 'no').
479
+ high_name (str): The name for the value when above threshold (default is 'yes').
480
+ sum_comparison (bool): If True, sum all values in input_columns and compare to threshold (default is False).
481
+ unit_type (str): Whether values are in "ha" or "percent".
482
+
483
+ Returns:
484
+ data_lookup_type: The DataFrame with the new column added.
485
+ """
486
+ # Create a new column and initialize with low_name
487
+ new_column = pd.Series(low_name, index=df.index, name=new_column_name)
488
+
489
+ # Default behavior: use '>' for single column comparison
490
+ if sum_comparison:
491
+ # Sum all values in specified columns and compare to threshold
492
+ sum_values = df[input_columns].sum(axis=1)
493
+ new_column[sum_values > threshold] = high_name
494
+ else:
495
+ # Check if any values in specified columns are above the threshold and update the new column accordingly
496
+ for col in input_columns:
497
+ # So that threshold is always in percent, if outputs are in ha, the code converts to percent (based on dividing by the geometry_area_column column.
498
+ # Clamping is needed due to differences in decimal places (meaning input values may go just over 100)
499
+ if unit_type == "ha":
500
+ df[geometry_area_column] = pd.to_numeric(
501
+ df[geometry_area_column], errors="coerce"
502
+ )
503
+ val_to_check = clamp(
504
+ ((df[col] / df[geometry_area_column]) * 100), 0, 100
505
+ )
506
+ else:
507
+ val_to_check = df[col]
508
+ new_column[val_to_check > threshold] = high_name
509
+
510
+ # Concatenate the new column to the DataFrame
511
+ df = pd.concat([df, new_column], axis=1)
512
+ return df
513
+
514
+
515
+ def get_cols_ind_01_treecover(lookup_gee_datasets_df):
516
+ """
517
+ Generate a list of dataset names for the treecover theme, excluding those marked for exclusion.
518
+
519
+ Args:
520
+ lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
521
+
522
+ Returns:
523
+ list: List of dataset names set to be used in the risk calculations for the treecover theme, excluding those marked for exclusion.
524
+ """
525
+ lookup_gee_datasets_df = lookup_gee_datasets_df[
526
+ lookup_gee_datasets_df["exclude_from_output"] != 1
527
+ ]
528
+ return list(
529
+ lookup_gee_datasets_df["name"][
530
+ (lookup_gee_datasets_df["use_for_risk"] == 1)
531
+ & (lookup_gee_datasets_df["theme"] == "treecover")
532
+ ]
533
+ )
534
+
535
+
536
+ def get_cols_ind_02_commodities(lookup_gee_datasets_df):
537
+ """
538
+ Generate a list of dataset names for the commodities theme, excluding those marked for exclusion.
539
+
540
+ Args:
541
+ lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
542
+
543
+ Returns:
544
+ list: List of dataset names set to be used in the risk calculations for the commodities theme, excluding those marked for exclusion.
545
+ """
546
+ lookup_gee_datasets_df = lookup_gee_datasets_df[
547
+ lookup_gee_datasets_df["exclude_from_output"] != 1
548
+ ]
549
+ return list(
550
+ lookup_gee_datasets_df["name"][
551
+ (lookup_gee_datasets_df["use_for_risk"] == 1)
552
+ & (lookup_gee_datasets_df["theme"] == "commodities")
553
+ ]
554
+ )
555
+
556
+
557
+ def get_cols_ind_03_dist_before_2020(lookup_gee_datasets_df):
558
+ """
559
+ Generate a list of dataset names for the disturbance before 2020 theme, excluding those marked for exclusion.
560
+
561
+ Args:
562
+ lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
563
+
564
+ Returns:
565
+ list: List of dataset names set to be used in the risk calculations for the disturbance before 2020 theme, excluding those marked for exclusion.
566
+ """
567
+ lookup_gee_datasets_df = lookup_gee_datasets_df[
568
+ lookup_gee_datasets_df["exclude_from_output"] != 1
569
+ ]
570
+ return list(
571
+ lookup_gee_datasets_df["name"][
572
+ (lookup_gee_datasets_df["use_for_risk"] == 1)
573
+ & (lookup_gee_datasets_df["theme"] == "disturbance_before")
574
+ ]
575
+ )
576
+
577
+
578
+ def get_cols_ind_04_dist_after_2020(lookup_gee_datasets_df):
579
+ """
580
+ Generate a list of dataset names for the disturbance after 2020 theme, excluding those marked for exclusion.
581
+
582
+ Args:
583
+ lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
584
+
585
+ Returns:
586
+ list: List of dataset names set to be used in the risk calculations for the disturbance after 2020 theme, excluding those marked for exclusion.
587
+ """
588
+ lookup_gee_datasets_df = lookup_gee_datasets_df[
589
+ lookup_gee_datasets_df["exclude_from_output"] != 1
590
+ ]
591
+ return list(
592
+ lookup_gee_datasets_df["name"][
593
+ (lookup_gee_datasets_df["use_for_risk"] == 1)
594
+ & (lookup_gee_datasets_df["theme"] == "disturbance_after")
595
+ ]
596
+ )
597
+
598
+
599
+ def get_cols_ind_05_primary_2020(lookup_gee_datasets_df):
600
+ """
601
+ Generate a list of dataset names for primary forests in 2020
602
+
603
+ Args:
604
+ lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
605
+
606
+ Returns:
607
+ list: List of dataset names set to be used in the risk calculations for the degradation - primary forest in 2020, excluding those marked for exclusion.
608
+ """
609
+ lookup_gee_datasets_df = lookup_gee_datasets_df[
610
+ lookup_gee_datasets_df["exclude_from_output"] != 1
611
+ ]
612
+ return list(
613
+ lookup_gee_datasets_df["name"][
614
+ (lookup_gee_datasets_df["use_for_risk_timber"] == 1)
615
+ & (lookup_gee_datasets_df["theme_timber"] == "primary")
616
+ ]
617
+ )
618
+
619
+
620
+ def get_cols_ind_06_nat_reg_2020(lookup_gee_datasets_df):
621
+ """
622
+ Generate a list of dataset names for naturally_reg_2020 forests in 2020
623
+
624
+ Args:
625
+ lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
626
+
627
+ Returns:
628
+ list: List of dataset names set to be used in the risk calculations for the degradation - naturally_reg_2020 in 2020, excluding those marked for exclusion.
629
+ """
630
+ lookup_gee_datasets_df = lookup_gee_datasets_df[
631
+ lookup_gee_datasets_df["exclude_from_output"] != 1
632
+ ]
633
+ return list(
634
+ lookup_gee_datasets_df["name"][
635
+ (lookup_gee_datasets_df["use_for_risk_timber"] == 1)
636
+ & (lookup_gee_datasets_df["theme_timber"] == "naturally_reg_2020")
637
+ ]
638
+ )
639
+
640
+
641
+ def get_cols_ind_07_planted_2020(lookup_gee_datasets_df):
642
+ """
643
+ Generate a list of dataset names for planted and plantation forests in 2020
644
+
645
+ Args:
646
+ lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
647
+
648
+ Returns:
649
+ list: List of dataset names set to be used in the risk calculations for the degradation - planted and plantation forests in 2020, excluding those marked for exclusion.
650
+ """
651
+ lookup_gee_datasets_df = lookup_gee_datasets_df[
652
+ lookup_gee_datasets_df["exclude_from_output"] != 1
653
+ ]
654
+ return list(
655
+ lookup_gee_datasets_df["name"][
656
+ (lookup_gee_datasets_df["use_for_risk_timber"] == 1)
657
+ & (lookup_gee_datasets_df["theme_timber"] == "planted_plantation_2020")
658
+ ]
659
+ )
660
+
661
+
662
+ def get_cols_ind_08_planted_after_2020(lookup_gee_datasets_df):
663
+ """
664
+ Generate a list of dataset names for planted and plantation forests post 2020
665
+
666
+ Args:
667
+ lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
668
+
669
+ Returns:
670
+ list: List of dataset names set to be used in the risk calculations for the degradation - planted and plantation forests post 2020, excluding those marked for exclusion.
671
+ """
672
+ lookup_gee_datasets_df = lookup_gee_datasets_df[
673
+ lookup_gee_datasets_df["exclude_from_output"] != 1
674
+ ]
675
+ return list(
676
+ lookup_gee_datasets_df["name"][
677
+ (lookup_gee_datasets_df["use_for_risk_timber"] == 1)
678
+ & (
679
+ lookup_gee_datasets_df["theme_timber"]
680
+ == "planted_plantation_after_2020"
681
+ )
682
+ ]
683
+ )
684
+
685
+
686
+ def get_cols_ind_09_treecover_after_2020(lookup_gee_datasets_df):
687
+ """
688
+ Generate a list of dataset names for treecover post 2020
689
+
690
+ Args:
691
+ lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
692
+
693
+ Returns:
694
+ list: List of dataset names set to be used in the risk calculations for the degradation - treecover post 2020, excluding those marked for exclusion.
695
+ """
696
+ lookup_gee_datasets_df = lookup_gee_datasets_df[
697
+ lookup_gee_datasets_df["exclude_from_output"] != 1
698
+ ]
699
+ return list(
700
+ lookup_gee_datasets_df["name"][
701
+ (lookup_gee_datasets_df["use_for_risk_timber"] == 1)
702
+ & (lookup_gee_datasets_df["theme_timber"] == "treecover_after_2020")
703
+ ]
704
+ )
705
+
706
+
707
+ def get_cols_ind_10_agri_after_2020(lookup_gee_datasets_df):
708
+ """
709
+ Generate a list of dataset names for croplands post 2020
710
+
711
+ Args:
712
+ lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
713
+
714
+ Returns:
715
+ list: List of dataset names set to be used in the risk calculations for the degradation - croplands post 2020, excluding those marked for exclusion.
716
+ """
717
+ lookup_gee_datasets_df = lookup_gee_datasets_df[
718
+ lookup_gee_datasets_df["exclude_from_output"] != 1
719
+ ]
720
+ return list(
721
+ lookup_gee_datasets_df["name"][
722
+ (lookup_gee_datasets_df["use_for_risk_timber"] == 1)
723
+ & (lookup_gee_datasets_df["theme_timber"] == "agri_after_2020")
724
+ ]
725
+ )
726
+
727
+
728
+ def get_cols_ind_11_logging_before_2020(lookup_gee_datasets_df):
729
+ """
730
+ Generate a list of dataset names for logging concessions (2020 if available)
731
+
732
+ Args:
733
+ lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
734
+
735
+ Returns:
736
+ list: List of dataset names set to be used in the risk calculations for the degradation - logging concessions, excluding those marked for exclusion.
737
+ """
738
+ lookup_gee_datasets_df = lookup_gee_datasets_df[
739
+ lookup_gee_datasets_df["exclude_from_output"] != 1
740
+ ]
741
+ return list(
742
+ lookup_gee_datasets_df["name"][
743
+ (lookup_gee_datasets_df["use_for_risk_timber"] == 1)
744
+ & (lookup_gee_datasets_df["theme_timber"] == "logging_concession")
745
+ ]
746
+ )
747
+
748
+
749
+ def clamp(
750
+ value: float | pd.Series, min_val: float, max_val: float
751
+ ) -> float | pd.Series:
752
+ """
753
+ Clamp a value or a Pandas Series within a specified range.
754
+
755
+ Args:
756
+ value (float | pd.Series): The value or series to be clamped.
757
+ min_val (float): The minimum value of the range.
758
+ max_val (float): The maximum value of the range.
759
+
760
+ Returns:
761
+ float | pd.Series: The clamped value or series within the range.
762
+ """
763
+ if isinstance(value, pd.Series):
764
+ return value.clip(lower=min_val, upper=max_val)
765
+ else:
766
+ return max(min_val, min(value, max_val))
767
+
768
+
769
+ def check_range(value: float) -> None:
770
+ if not (0 <= value <= 100):
771
+ raise ValueError("Value must be between 0 and 100.")