openforis-whisp 2.0.0a6__py3-none-any.whl → 2.0.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openforis_whisp/__init__.py +74 -75
- openforis_whisp/data_conversion.py +493 -493
- openforis_whisp/datasets.py +1377 -1384
- openforis_whisp/logger.py +75 -75
- openforis_whisp/parameters/__init__.py +15 -15
- openforis_whisp/parameters/config_runtime.py +44 -44
- openforis_whisp/parameters/lookup_context_and_metadata.csv +13 -13
- openforis_whisp/parameters/lookup_gee_datasets.csv +2 -1
- openforis_whisp/pd_schemas.py +77 -77
- openforis_whisp/reformat.py +696 -495
- openforis_whisp/risk.py +848 -771
- openforis_whisp/stats.py +1228 -1134
- openforis_whisp/utils.py +194 -154
- {openforis_whisp-2.0.0a6.dist-info → openforis_whisp-2.0.0b1.dist-info}/LICENSE +21 -21
- {openforis_whisp-2.0.0a6.dist-info → openforis_whisp-2.0.0b1.dist-info}/METADATA +2 -2
- openforis_whisp-2.0.0b1.dist-info/RECORD +17 -0
- {openforis_whisp-2.0.0a6.dist-info → openforis_whisp-2.0.0b1.dist-info}/WHEEL +1 -1
- openforis_whisp-2.0.0a6.dist-info/RECORD +0 -17
openforis_whisp/risk.py
CHANGED
|
@@ -1,771 +1,848 @@
|
|
|
1
|
-
import pandas as pd
|
|
2
|
-
|
|
3
|
-
from .pd_schemas import data_lookup_type
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
from openforis_whisp.parameters.config_runtime import (
|
|
7
|
-
geometry_area_column,
|
|
8
|
-
DEFAULT_GEE_DATASETS_LOOKUP_TABLE_PATH,
|
|
9
|
-
stats_unit_type_column, # Add this import
|
|
10
|
-
)
|
|
11
|
-
|
|
12
|
-
from openforis_whisp.reformat import filter_lookup_by_country_codes
|
|
13
|
-
|
|
14
|
-
# could embed this in each function below that uses lookup_gee_datasets_df.
|
|
15
|
-
lookup_gee_datasets_df: data_lookup_type = pd.read_csv(
|
|
16
|
-
DEFAULT_GEE_DATASETS_LOOKUP_TABLE_PATH
|
|
17
|
-
)
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
# requires lookup_gee_datasets_df
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
# Add function to detect unit type from dataframe
|
|
24
|
-
def detect_unit_type(df, explicit_unit_type=None):
|
|
25
|
-
"""
|
|
26
|
-
Determine the unit type from the dataframe or use the override value.
|
|
27
|
-
|
|
28
|
-
Args:
|
|
29
|
-
df (DataFrame): Input DataFrame.
|
|
30
|
-
explicit_unit_type (str, optional): Override unit type ('ha' or 'percent').
|
|
31
|
-
|
|
32
|
-
Returns:
|
|
33
|
-
str: The unit type to use for calculations.
|
|
34
|
-
|
|
35
|
-
Raises:
|
|
36
|
-
ValueError: If the unit type can't be determined and no override is provided,
|
|
37
|
-
or if there are mixed unit types in the dataframe.
|
|
38
|
-
"""
|
|
39
|
-
# If override is provided, use it
|
|
40
|
-
if explicit_unit_type is not None:
|
|
41
|
-
if explicit_unit_type not in ["ha", "percent"]:
|
|
42
|
-
raise ValueError(
|
|
43
|
-
f"Invalid unit type: {explicit_unit_type}. Must be 'ha' or 'percent'."
|
|
44
|
-
)
|
|
45
|
-
return explicit_unit_type
|
|
46
|
-
|
|
47
|
-
# Check if unit type column exists in the dataframe
|
|
48
|
-
if stats_unit_type_column not in df.columns:
|
|
49
|
-
raise ValueError(
|
|
50
|
-
f"Column '{stats_unit_type_column}' not found in dataframe. "
|
|
51
|
-
"Please provide 'explicit_unit_type' parameter to specify the unit type."
|
|
52
|
-
)
|
|
53
|
-
|
|
54
|
-
# Get unique values from the column
|
|
55
|
-
unit_types = df[stats_unit_type_column].unique()
|
|
56
|
-
|
|
57
|
-
# Check for mixed unit types
|
|
58
|
-
if len(unit_types) > 1:
|
|
59
|
-
raise ValueError(
|
|
60
|
-
f"Mixed unit types in dataframe: {unit_types}. All rows must use the same unit type."
|
|
61
|
-
)
|
|
62
|
-
|
|
63
|
-
# Get the single unit type
|
|
64
|
-
unit_type = unit_types[0]
|
|
65
|
-
|
|
66
|
-
# Validate that the unit type is recognized
|
|
67
|
-
if unit_type not in ["ha", "percent"]:
|
|
68
|
-
raise ValueError(
|
|
69
|
-
f"Unrecognized unit type: {unit_type}. Must be 'ha' or 'percent'."
|
|
70
|
-
)
|
|
71
|
-
|
|
72
|
-
return unit_type
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
# Update whisp_risk to accept and pass the unit_type parameter
|
|
76
|
-
def whisp_risk(
|
|
77
|
-
df: data_lookup_type, # CHECK THIS
|
|
78
|
-
ind_1_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
79
|
-
ind_2_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
80
|
-
ind_3_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
81
|
-
ind_4_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
82
|
-
ind_5_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
83
|
-
ind_6_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
84
|
-
ind_7_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
85
|
-
ind_8_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
86
|
-
ind_9_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
87
|
-
ind_10_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
88
|
-
ind_11_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
89
|
-
ind_1_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
90
|
-
ind_2_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
91
|
-
ind_3_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
92
|
-
ind_4_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
93
|
-
ind_5_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
94
|
-
ind_6_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
95
|
-
ind_7_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
96
|
-
ind_8_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
97
|
-
ind_9_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
98
|
-
ind_10_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
99
|
-
ind_11_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
100
|
-
ind_1_name: str = "Ind_01_treecover",
|
|
101
|
-
ind_2_name: str = "Ind_02_commodities",
|
|
102
|
-
ind_3_name: str = "Ind_03_disturbance_before_2020",
|
|
103
|
-
ind_4_name: str = "Ind_04_disturbance_after_2020",
|
|
104
|
-
ind_5_name: str = "Ind_05_primary_2020",
|
|
105
|
-
ind_6_name: str = "Ind_06_nat_reg_forest_2020",
|
|
106
|
-
ind_7_name: str = "Ind_07_planted_plantations_2020",
|
|
107
|
-
ind_8_name: str = "Ind_08_planted_plantations_after_2020",
|
|
108
|
-
ind_9_name: str = "Ind_09_treecover_after_2020",
|
|
109
|
-
ind_10_name: str = "Ind_10_agri_after_2020",
|
|
110
|
-
ind_11_name: str = "Ind_11_logging_concession_before_2020",
|
|
111
|
-
low_name: str = "no",
|
|
112
|
-
high_name: str = "yes",
|
|
113
|
-
explicit_unit_type: str = None,
|
|
114
|
-
national_codes: list[str] = None, # List of ISO2 country codes to filter by
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
)
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
)
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
df:
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
):
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
)
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
# If
|
|
416
|
-
#
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
df=
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
lookup_gee_datasets_df
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
list
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
1
|
+
import pandas as pd
|
|
2
|
+
|
|
3
|
+
from .pd_schemas import data_lookup_type
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
from openforis_whisp.parameters.config_runtime import (
|
|
7
|
+
geometry_area_column,
|
|
8
|
+
DEFAULT_GEE_DATASETS_LOOKUP_TABLE_PATH,
|
|
9
|
+
stats_unit_type_column, # Add this import
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
from openforis_whisp.reformat import filter_lookup_by_country_codes
|
|
13
|
+
|
|
14
|
+
# could embed this in each function below that uses lookup_gee_datasets_df.
|
|
15
|
+
lookup_gee_datasets_df: data_lookup_type = pd.read_csv(
|
|
16
|
+
DEFAULT_GEE_DATASETS_LOOKUP_TABLE_PATH
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
# requires lookup_gee_datasets_df
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
# Add function to detect unit type from dataframe
|
|
24
|
+
def detect_unit_type(df, explicit_unit_type=None):
|
|
25
|
+
"""
|
|
26
|
+
Determine the unit type from the dataframe or use the override value.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
df (DataFrame): Input DataFrame.
|
|
30
|
+
explicit_unit_type (str, optional): Override unit type ('ha' or 'percent').
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
str: The unit type to use for calculations.
|
|
34
|
+
|
|
35
|
+
Raises:
|
|
36
|
+
ValueError: If the unit type can't be determined and no override is provided,
|
|
37
|
+
or if there are mixed unit types in the dataframe.
|
|
38
|
+
"""
|
|
39
|
+
# If override is provided, use it
|
|
40
|
+
if explicit_unit_type is not None:
|
|
41
|
+
if explicit_unit_type not in ["ha", "percent"]:
|
|
42
|
+
raise ValueError(
|
|
43
|
+
f"Invalid unit type: {explicit_unit_type}. Must be 'ha' or 'percent'."
|
|
44
|
+
)
|
|
45
|
+
return explicit_unit_type
|
|
46
|
+
|
|
47
|
+
# Check if unit type column exists in the dataframe
|
|
48
|
+
if stats_unit_type_column not in df.columns:
|
|
49
|
+
raise ValueError(
|
|
50
|
+
f"Column '{stats_unit_type_column}' not found in dataframe. "
|
|
51
|
+
"Please provide 'explicit_unit_type' parameter to specify the unit type."
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
# Get unique values from the column
|
|
55
|
+
unit_types = df[stats_unit_type_column].unique()
|
|
56
|
+
|
|
57
|
+
# Check for mixed unit types
|
|
58
|
+
if len(unit_types) > 1:
|
|
59
|
+
raise ValueError(
|
|
60
|
+
f"Mixed unit types in dataframe: {unit_types}. All rows must use the same unit type."
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
# Get the single unit type
|
|
64
|
+
unit_type = unit_types[0]
|
|
65
|
+
|
|
66
|
+
# Validate that the unit type is recognized
|
|
67
|
+
if unit_type not in ["ha", "percent"]:
|
|
68
|
+
raise ValueError(
|
|
69
|
+
f"Unrecognized unit type: {unit_type}. Must be 'ha' or 'percent'."
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
return unit_type
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
# Update whisp_risk to accept and pass the unit_type parameter
|
|
76
|
+
def whisp_risk(
|
|
77
|
+
df: data_lookup_type, # CHECK THIS
|
|
78
|
+
ind_1_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
79
|
+
ind_2_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
80
|
+
ind_3_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
81
|
+
ind_4_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
82
|
+
ind_5_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
83
|
+
ind_6_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
84
|
+
ind_7_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
85
|
+
ind_8_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
86
|
+
ind_9_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
87
|
+
ind_10_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
88
|
+
ind_11_pcent_threshold: float = 10, # default values (draft decision tree and parameters)
|
|
89
|
+
ind_1_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
90
|
+
ind_2_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
91
|
+
ind_3_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
92
|
+
ind_4_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
93
|
+
ind_5_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
94
|
+
ind_6_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
95
|
+
ind_7_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
96
|
+
ind_8_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
97
|
+
ind_9_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
98
|
+
ind_10_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
99
|
+
ind_11_input_columns: pd.Series = None, # see lookup_gee_datasets for details
|
|
100
|
+
ind_1_name: str = "Ind_01_treecover",
|
|
101
|
+
ind_2_name: str = "Ind_02_commodities",
|
|
102
|
+
ind_3_name: str = "Ind_03_disturbance_before_2020",
|
|
103
|
+
ind_4_name: str = "Ind_04_disturbance_after_2020",
|
|
104
|
+
ind_5_name: str = "Ind_05_primary_2020",
|
|
105
|
+
ind_6_name: str = "Ind_06_nat_reg_forest_2020",
|
|
106
|
+
ind_7_name: str = "Ind_07_planted_plantations_2020",
|
|
107
|
+
ind_8_name: str = "Ind_08_planted_plantations_after_2020",
|
|
108
|
+
ind_9_name: str = "Ind_09_treecover_after_2020",
|
|
109
|
+
ind_10_name: str = "Ind_10_agri_after_2020",
|
|
110
|
+
ind_11_name: str = "Ind_11_logging_concession_before_2020",
|
|
111
|
+
low_name: str = "no",
|
|
112
|
+
high_name: str = "yes",
|
|
113
|
+
explicit_unit_type: str = None,
|
|
114
|
+
national_codes: list[str] = None, # List of ISO2 country codes to filter by
|
|
115
|
+
custom_bands_info: dict = None, # New parameter for custom band risk info
|
|
116
|
+
) -> data_lookup_type:
|
|
117
|
+
"""
|
|
118
|
+
Adds the EUDR (European Union Deforestation Risk) column to the DataFrame based on indicator values.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
df (DataFrame): Input DataFrame.
|
|
122
|
+
ind_1_pcent_threshold (int, optional): Percentage threshold for the first indicator. Defaults to 10.
|
|
123
|
+
ind_2_pcent_threshold (int, optional): Percentage threshold for the second indicator. Defaults to 10.
|
|
124
|
+
ind_3_pcent_threshold (int, optional): Percentage threshold for the third indicator. Defaults to 0.
|
|
125
|
+
ind_4_pcent_threshold (int, optional): Percentage threshold for the fourth indicator. Defaults to 0.
|
|
126
|
+
ind_1_input_columns (list, optional): List of input columns for the first indicator. Defaults to columns for the treecover theme.
|
|
127
|
+
ind_2_input_columns (list, optional): List of input columns for the second indicator. Defaults to columns for the commodities theme.
|
|
128
|
+
ind_3_input_columns (list, optional): List of input columns for the third indicator. Defaults to columns for disturbance before 2020.
|
|
129
|
+
ind_4_input_columns (list, optional): List of input columns for the fourth indicator. Defaults to columns for disturbance after 2020.
|
|
130
|
+
ind_1_name (str, optional): Name of the first indicator column. Defaults to "Indicator_1_treecover".
|
|
131
|
+
ind_2_name (str, optional): Name of the second indicator column. Defaults to "Indicator_2_commodities".
|
|
132
|
+
ind_3_name (str, optional): Name of the third indicator column. Defaults to "Indicator_3_disturbance_before_2020".
|
|
133
|
+
ind_4_name (str, optional): Name of the fourth indicator column. Defaults to "Indicator_4_disturbance_after_2020".
|
|
134
|
+
low_name (str, optional): Value shown in table if less than or equal to the threshold. Defaults to "no".
|
|
135
|
+
high_name (str, optional): Value shown in table if more than the threshold. Defaults to "yes".
|
|
136
|
+
explicit_unit_type (str, optional): Override the autodetected unit type ('ha' or 'percent').
|
|
137
|
+
If not provided, will detect from dataframe 'unit' column.
|
|
138
|
+
custom_bands_info (dict, optional): Custom band risk information. Dict format:
|
|
139
|
+
{
|
|
140
|
+
'band_name': {
|
|
141
|
+
'theme': 'treecover', # or 'commodities', 'disturbance_before', 'disturbance_after'
|
|
142
|
+
'theme_timber': 'primary', # or 'naturally_reg_2020', 'planted_plantation_2020', etc.
|
|
143
|
+
'use_for_risk': 1, # 0 or 1
|
|
144
|
+
'use_for_risk_timber': 1, # 0 or 1
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
If None, custom bands won't be included in risk calculations.
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
data_lookup_type: DataFrame with added risk columns.
|
|
151
|
+
"""
|
|
152
|
+
# Determine the unit type
|
|
153
|
+
unit_type = detect_unit_type(df, explicit_unit_type)
|
|
154
|
+
print(f"Using unit type: {unit_type}")
|
|
155
|
+
|
|
156
|
+
lookup_df_copy = lookup_gee_datasets_df.copy()
|
|
157
|
+
|
|
158
|
+
# Add custom bands to lookup if provided
|
|
159
|
+
if custom_bands_info:
|
|
160
|
+
lookup_df_copy = add_custom_bands_info_to_lookup(
|
|
161
|
+
lookup_df_copy, custom_bands_info, df.columns
|
|
162
|
+
)
|
|
163
|
+
print(f"Including custom bands: {list(custom_bands_info.keys())}")
|
|
164
|
+
# print(f"appended custom bands info to lookup table")
|
|
165
|
+
if national_codes:
|
|
166
|
+
print(f"Filtering by national codes: {national_codes}")
|
|
167
|
+
# Filter by national codes
|
|
168
|
+
filtered_lookup_gee_datasets_df = filter_lookup_by_country_codes(
|
|
169
|
+
lookup_df=lookup_df_copy,
|
|
170
|
+
filter_col="ISO2_code",
|
|
171
|
+
national_codes=national_codes,
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
# Get indicator columns (now includes custom bands)
|
|
175
|
+
if ind_1_input_columns is None:
|
|
176
|
+
ind_1_input_columns = get_cols_ind_01_treecover(filtered_lookup_gee_datasets_df)
|
|
177
|
+
if ind_2_input_columns is None:
|
|
178
|
+
ind_2_input_columns = get_cols_ind_02_commodities(
|
|
179
|
+
filtered_lookup_gee_datasets_df
|
|
180
|
+
)
|
|
181
|
+
if ind_3_input_columns is None:
|
|
182
|
+
ind_3_input_columns = get_cols_ind_03_dist_before_2020(
|
|
183
|
+
filtered_lookup_gee_datasets_df
|
|
184
|
+
)
|
|
185
|
+
if ind_4_input_columns is None:
|
|
186
|
+
ind_4_input_columns = get_cols_ind_04_dist_after_2020(
|
|
187
|
+
filtered_lookup_gee_datasets_df
|
|
188
|
+
)
|
|
189
|
+
if ind_5_input_columns is None:
|
|
190
|
+
ind_5_input_columns = get_cols_ind_05_primary_2020(
|
|
191
|
+
filtered_lookup_gee_datasets_df
|
|
192
|
+
)
|
|
193
|
+
if ind_6_input_columns is None:
|
|
194
|
+
ind_6_input_columns = get_cols_ind_06_nat_reg_2020(
|
|
195
|
+
filtered_lookup_gee_datasets_df
|
|
196
|
+
)
|
|
197
|
+
if ind_7_input_columns is None:
|
|
198
|
+
ind_7_input_columns = get_cols_ind_07_planted_2020(
|
|
199
|
+
filtered_lookup_gee_datasets_df
|
|
200
|
+
)
|
|
201
|
+
if ind_8_input_columns is None:
|
|
202
|
+
ind_8_input_columns = get_cols_ind_08_planted_after_2020(
|
|
203
|
+
filtered_lookup_gee_datasets_df
|
|
204
|
+
)
|
|
205
|
+
if ind_9_input_columns is None:
|
|
206
|
+
ind_9_input_columns = get_cols_ind_09_treecover_after_2020(
|
|
207
|
+
filtered_lookup_gee_datasets_df
|
|
208
|
+
)
|
|
209
|
+
if ind_10_input_columns is None:
|
|
210
|
+
ind_10_input_columns = get_cols_ind_10_agri_after_2020(
|
|
211
|
+
filtered_lookup_gee_datasets_df
|
|
212
|
+
)
|
|
213
|
+
if ind_11_input_columns is None:
|
|
214
|
+
ind_11_input_columns = get_cols_ind_11_logging_before_2020(
|
|
215
|
+
filtered_lookup_gee_datasets_df
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
# Check range of values
|
|
219
|
+
check_range(ind_1_pcent_threshold)
|
|
220
|
+
check_range(ind_2_pcent_threshold)
|
|
221
|
+
check_range(ind_3_pcent_threshold)
|
|
222
|
+
check_range(ind_4_pcent_threshold)
|
|
223
|
+
check_range(ind_5_pcent_threshold)
|
|
224
|
+
check_range(ind_6_pcent_threshold)
|
|
225
|
+
check_range(ind_7_pcent_threshold)
|
|
226
|
+
check_range(ind_8_pcent_threshold)
|
|
227
|
+
check_range(ind_9_pcent_threshold)
|
|
228
|
+
check_range(ind_10_pcent_threshold)
|
|
229
|
+
check_range(ind_11_pcent_threshold)
|
|
230
|
+
|
|
231
|
+
input_cols = [
|
|
232
|
+
ind_1_input_columns,
|
|
233
|
+
ind_2_input_columns,
|
|
234
|
+
ind_3_input_columns,
|
|
235
|
+
ind_4_input_columns,
|
|
236
|
+
ind_5_input_columns,
|
|
237
|
+
ind_6_input_columns,
|
|
238
|
+
ind_7_input_columns,
|
|
239
|
+
ind_8_input_columns,
|
|
240
|
+
ind_9_input_columns,
|
|
241
|
+
ind_10_input_columns,
|
|
242
|
+
ind_11_input_columns,
|
|
243
|
+
]
|
|
244
|
+
thresholds = [
|
|
245
|
+
ind_1_pcent_threshold,
|
|
246
|
+
ind_2_pcent_threshold,
|
|
247
|
+
ind_3_pcent_threshold,
|
|
248
|
+
ind_4_pcent_threshold,
|
|
249
|
+
ind_5_pcent_threshold,
|
|
250
|
+
ind_6_pcent_threshold,
|
|
251
|
+
ind_7_pcent_threshold,
|
|
252
|
+
ind_8_pcent_threshold,
|
|
253
|
+
ind_9_pcent_threshold,
|
|
254
|
+
ind_10_pcent_threshold,
|
|
255
|
+
ind_11_pcent_threshold,
|
|
256
|
+
]
|
|
257
|
+
names = [
|
|
258
|
+
ind_1_name,
|
|
259
|
+
ind_2_name,
|
|
260
|
+
ind_3_name,
|
|
261
|
+
ind_4_name,
|
|
262
|
+
ind_5_name,
|
|
263
|
+
ind_6_name,
|
|
264
|
+
ind_7_name,
|
|
265
|
+
ind_8_name,
|
|
266
|
+
ind_9_name,
|
|
267
|
+
ind_10_name,
|
|
268
|
+
ind_11_name,
|
|
269
|
+
]
|
|
270
|
+
[check_range(threshold) for threshold in thresholds]
|
|
271
|
+
|
|
272
|
+
df_w_indicators = add_indicators(
|
|
273
|
+
df,
|
|
274
|
+
input_cols,
|
|
275
|
+
thresholds,
|
|
276
|
+
names,
|
|
277
|
+
low_name,
|
|
278
|
+
high_name,
|
|
279
|
+
unit_type, # Pass the unit type
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
df_w_indicators_and_risk_pcrop = add_eudr_risk_pcrop_col(
|
|
283
|
+
df=df_w_indicators,
|
|
284
|
+
ind_1_name=ind_1_name,
|
|
285
|
+
ind_2_name=ind_2_name,
|
|
286
|
+
ind_3_name=ind_3_name,
|
|
287
|
+
ind_4_name=ind_4_name,
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
df_w_indicators_and_risk_acrop = add_eudr_risk_acrop_col(
|
|
291
|
+
df=df_w_indicators,
|
|
292
|
+
ind_1_name=ind_1_name,
|
|
293
|
+
ind_2_name=ind_2_name,
|
|
294
|
+
ind_4_name=ind_4_name,
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
df_w_indicators_and_risk_timber = add_eudr_risk_timber_col(
|
|
298
|
+
df=df_w_indicators,
|
|
299
|
+
ind_2_name=ind_2_name,
|
|
300
|
+
ind_5_name=ind_5_name,
|
|
301
|
+
ind_6_name=ind_6_name,
|
|
302
|
+
ind_7_name=ind_7_name,
|
|
303
|
+
ind_8_name=ind_8_name,
|
|
304
|
+
ind_9_name=ind_9_name,
|
|
305
|
+
ind_10_name=ind_10_name,
|
|
306
|
+
ind_11_name=ind_11_name,
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
return df_w_indicators_and_risk_timber
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
def add_eudr_risk_pcrop_col(
|
|
313
|
+
df: data_lookup_type,
|
|
314
|
+
ind_1_name: str,
|
|
315
|
+
ind_2_name: str,
|
|
316
|
+
ind_3_name: str,
|
|
317
|
+
ind_4_name: str,
|
|
318
|
+
) -> data_lookup_type:
|
|
319
|
+
"""
|
|
320
|
+
Adds the EUDR (European Union Deforestation Risk) column to the DataFrame based on indicator values.
|
|
321
|
+
|
|
322
|
+
Args:
|
|
323
|
+
df (DataFrame): Input DataFrame.
|
|
324
|
+
ind_1_name (str, optional): Name of first indicator column. Defaults to "Ind_01_treecover".
|
|
325
|
+
ind_2_name (str, optional): Name of second indicator column. Defaults to "Ind_02_commodities".
|
|
326
|
+
ind_3_name (str, optional): Name of third indicator column. Defaults to "Ind_03_disturbance_before_2020".
|
|
327
|
+
ind_4_name (str, optional): Name of fourth indicator column. Defaults to "Ind_04_disturbance_after_2020".
|
|
328
|
+
|
|
329
|
+
Returns:
|
|
330
|
+
DataFrame: DataFrame with added 'EUDR_risk' column.
|
|
331
|
+
"""
|
|
332
|
+
|
|
333
|
+
for index, row in df.iterrows():
|
|
334
|
+
# If any of the first three indicators suggest low risk, set EUDR_risk to "low"
|
|
335
|
+
if (
|
|
336
|
+
row[ind_1_name] == "no"
|
|
337
|
+
or row[ind_2_name] == "yes"
|
|
338
|
+
or row[ind_3_name] == "yes"
|
|
339
|
+
):
|
|
340
|
+
df.at[index, "risk_pcrop"] = "low"
|
|
341
|
+
# If none of the first three indicators suggest low risk and Indicator 4 suggests no risk, set EUDR_risk to "more_info_needed"
|
|
342
|
+
elif row[ind_4_name] == "no":
|
|
343
|
+
df.at[index, "risk_pcrop"] = "more_info_needed"
|
|
344
|
+
# If none of the above conditions are met, set EUDR_risk to "high"
|
|
345
|
+
else:
|
|
346
|
+
df.at[index, "risk_pcrop"] = "high"
|
|
347
|
+
|
|
348
|
+
return df
|
|
349
|
+
|
|
350
|
+
|
|
351
|
+
def add_eudr_risk_acrop_col(
|
|
352
|
+
df: data_lookup_type,
|
|
353
|
+
ind_1_name: str,
|
|
354
|
+
ind_2_name: str,
|
|
355
|
+
ind_4_name: str,
|
|
356
|
+
) -> data_lookup_type:
|
|
357
|
+
"""
|
|
358
|
+
Adds the EUDR (European Union Deforestation Risk) column to the DataFrame based on indicator values.
|
|
359
|
+
|
|
360
|
+
Args:
|
|
361
|
+
df (DataFrame): Input DataFrame.
|
|
362
|
+
ind_1_name (str, optional): Name of first indicator column. Defaults to "Ind_01_treecover".
|
|
363
|
+
ind_2_name (str, optional): Name of second indicator column. Defaults to "Ind_02_commodities".
|
|
364
|
+
ind_4_name (str, optional): Name of fourth indicator column. Defaults to "Ind_04_disturbance_after_2020".
|
|
365
|
+
|
|
366
|
+
Returns:
|
|
367
|
+
DataFrame: DataFrame with added 'EUDR_risk' column.
|
|
368
|
+
"""
|
|
369
|
+
|
|
370
|
+
# soy risk
|
|
371
|
+
for index, row in df.iterrows():
|
|
372
|
+
# If there is no tree cover in 2020, set EUDR_risk_soy to "low"
|
|
373
|
+
if row[ind_1_name] == "no" or row[ind_2_name] == "yes":
|
|
374
|
+
df.at[index, "risk_acrop"] = "low"
|
|
375
|
+
# If there is tree cover in 2020 and distrubances post 2020, set EUDR_risk_soy to "high"
|
|
376
|
+
elif row[ind_1_name] == "yes" and row[ind_4_name] == "yes":
|
|
377
|
+
df.at[index, "risk_acrop"] = "high"
|
|
378
|
+
# If tree cover and no disturbances post 2020, set EUDR_risk to "more_info_needed"
|
|
379
|
+
else:
|
|
380
|
+
df.at[index, "risk_acrop"] = "more_info_needed"
|
|
381
|
+
|
|
382
|
+
return df
|
|
383
|
+
|
|
384
|
+
|
|
385
|
+
def add_eudr_risk_timber_col(
|
|
386
|
+
df: data_lookup_type,
|
|
387
|
+
ind_2_name: str,
|
|
388
|
+
ind_5_name: str,
|
|
389
|
+
ind_6_name: str,
|
|
390
|
+
ind_7_name: str,
|
|
391
|
+
ind_8_name: str,
|
|
392
|
+
ind_9_name: str,
|
|
393
|
+
ind_10_name: str,
|
|
394
|
+
ind_11_name: str,
|
|
395
|
+
) -> data_lookup_type:
|
|
396
|
+
"""
|
|
397
|
+
Adds the EUDR (European Union Deforestation Risk) column to the DataFrame based on indicator values.
|
|
398
|
+
|
|
399
|
+
Args:
|
|
400
|
+
df (DataFrame): Input DataFrame.
|
|
401
|
+
ind_2_name (str, optional): Name of second indicator column. Defaults to "Ind_02_commodities".
|
|
402
|
+
ind_5_name (str, optional): Name of fifth indicator column. Defaults to "Ind_05_primary_2020".
|
|
403
|
+
ind_6_name (str, optional): Name of sixth indicator column. Defaults to "Ind_06_nat_reg_forest_2020".
|
|
404
|
+
ind_7_name (str, optional): Name of seventh indicator column. Defaults to "Ind_07_planted_plantations_2020".
|
|
405
|
+
ind_8_name (str, optional): Name of eighth indicator column. Defaults to "Ind_08_planted_plantations_after_2020".
|
|
406
|
+
ind_9_name (str, optional): Name of ninth indicator column. Defaults to "Ind_09_treecover_after_2020".
|
|
407
|
+
ind_10_name (str, optional): Name of tenth indicator column. Defaults to "Ind_10_agri_after_2020".
|
|
408
|
+
ind_11_name (str, optional): Name of eleventh indicator column. Defaults to "Ind_11_logging_concession_before_2020".
|
|
409
|
+
|
|
410
|
+
Returns:
|
|
411
|
+
DataFrame: DataFrame with added 'EUDR_risk' column.
|
|
412
|
+
"""
|
|
413
|
+
|
|
414
|
+
for index, row in df.iterrows():
|
|
415
|
+
# If there is a commodity in 2020 (ind_2_name)
|
|
416
|
+
# OR if there is planted-plantation in 2020 (ind_7_name) AND no agriculture in 2023 (ind_10_name), set EUDR_risk_timber to "low"
|
|
417
|
+
if row[ind_2_name] == "yes" or (
|
|
418
|
+
row[ind_7_name] == "yes" and row[ind_10_name] == "no"
|
|
419
|
+
):
|
|
420
|
+
df.at[index, "risk_timber"] = "low"
|
|
421
|
+
# If there is a natural forest primary (ind_5_name) or naturally regenerating (ind_6_name) or planted forest (ind_7_name) in 2020 AND agricultural after 2020 (ind_10_name), set EUDR_timber to high
|
|
422
|
+
elif (
|
|
423
|
+
row[ind_5_name] == "yes"
|
|
424
|
+
or row[ind_6_name] == "yes"
|
|
425
|
+
or row[ind_7_name] == "yes"
|
|
426
|
+
) and row[ind_10_name] == "yes":
|
|
427
|
+
df.at[index, "risk_timber"] = "high"
|
|
428
|
+
# If there is a natural forest primary (ind_5_name) or naturally regenerating (ind_6_name) AND planted after 2020 (ind_8_name), set EUDR_risk to "high"
|
|
429
|
+
elif (row[ind_5_name] == "yes" or row[ind_6_name] == "yes") and row[
|
|
430
|
+
ind_8_name
|
|
431
|
+
] == "yes":
|
|
432
|
+
df.at[index, "risk_timber"] = "high"
|
|
433
|
+
# No data yet on OWL conversion
|
|
434
|
+
# If primary or naturally regenerating or planted forest in 2020 and OWL in 2023, set EUDR_risk to high
|
|
435
|
+
# elif (row[ind_5_name] == "yes" or row[ind_6_name] == "yes" or row[ind_7_name] == "yes") and row[ind_10_name] == "yes":
|
|
436
|
+
# df.at[index, 'EUDR_risk_timber'] = "high"
|
|
437
|
+
|
|
438
|
+
# If there is a natural primary forest (ind_5_name) OR naturally regenerating in 2020 (ind_6_name) AND an information on management practice any time (ind_11_name) OR tree cover or regrowth post 2020 (ind_9_name), set EUDR_risk_timber to "low"
|
|
439
|
+
elif (row[ind_5_name] == "yes" or row[ind_6_name] == "yes") and (
|
|
440
|
+
row[ind_9_name] == "yes" or row[ind_11_name] == "yes"
|
|
441
|
+
):
|
|
442
|
+
df.at[index, "risk_timber"] = "low"
|
|
443
|
+
# If primary (ind_5_name) OR naturally regenerating in 2020 (ind_6_name) and no other info, set EUDR_risk to "more_info_needed"
|
|
444
|
+
elif row[ind_5_name] == "yes" or row[ind_6_name] == "yes":
|
|
445
|
+
df.at[index, "risk_timber"] = "more_info_needed"
|
|
446
|
+
# If none of the above conditions are met, set EUDR_risk to "low"
|
|
447
|
+
else:
|
|
448
|
+
df.at[index, "risk_timber"] = "low"
|
|
449
|
+
|
|
450
|
+
return df
|
|
451
|
+
|
|
452
|
+
|
|
453
|
+
def add_indicators(
|
|
454
|
+
df: data_lookup_type,
|
|
455
|
+
input_cols: list[str],
|
|
456
|
+
thresholds: list[float],
|
|
457
|
+
names: list[str],
|
|
458
|
+
low_name: str = "no",
|
|
459
|
+
high_name: str = "yes",
|
|
460
|
+
unit_type: str = None,
|
|
461
|
+
) -> data_lookup_type:
|
|
462
|
+
for input_col, threshold, name in zip(input_cols, thresholds, names):
|
|
463
|
+
df = add_indicator_column(
|
|
464
|
+
df=df,
|
|
465
|
+
input_columns=input_col,
|
|
466
|
+
threshold=threshold,
|
|
467
|
+
new_column_name=name,
|
|
468
|
+
low_name=low_name,
|
|
469
|
+
high_name=high_name,
|
|
470
|
+
sum_comparison=False,
|
|
471
|
+
unit_type=unit_type, # Pass the unit type
|
|
472
|
+
)
|
|
473
|
+
return df
|
|
474
|
+
|
|
475
|
+
|
|
476
|
+
# Update add_indicator_column to use the unit_type parameter
|
|
477
|
+
def add_indicator_column(
|
|
478
|
+
df: data_lookup_type,
|
|
479
|
+
input_columns: list[str],
|
|
480
|
+
threshold: float,
|
|
481
|
+
new_column_name: str,
|
|
482
|
+
low_name: str = "no",
|
|
483
|
+
high_name: str = "yes",
|
|
484
|
+
sum_comparison: bool = False,
|
|
485
|
+
unit_type: str = None, # unit_type parameter
|
|
486
|
+
) -> data_lookup_type:
|
|
487
|
+
"""
|
|
488
|
+
Add a new column to the DataFrame based on the specified columns, threshold, and comparison sign.
|
|
489
|
+
|
|
490
|
+
Parameters:
|
|
491
|
+
df (data_lookup_type): The pandas DataFrame to which the column will be added.
|
|
492
|
+
input_columns (list): List of column names to check for threshold.
|
|
493
|
+
threshold (float): The threshold value to compare against.
|
|
494
|
+
new_column_name (str): The name of the new column to be added.
|
|
495
|
+
The '>' sign is used for comparisons.
|
|
496
|
+
When 'sum comparison' == True, then the threshold is compared to the sum of all those listed in 'input_columns', as opposed to when Flalse, when each column in the list is compared to the threshold individually
|
|
497
|
+
low_name (str): The name for the value when below or equal to threshold (default is 'no').
|
|
498
|
+
high_name (str): The name for the value when above threshold (default is 'yes').
|
|
499
|
+
sum_comparison (bool): If True, sum all values in input_columns and compare to threshold (default is False).
|
|
500
|
+
unit_type (str): Whether values are in "ha" or "percent".
|
|
501
|
+
|
|
502
|
+
Returns:
|
|
503
|
+
data_lookup_type: The DataFrame with the new column added.
|
|
504
|
+
"""
|
|
505
|
+
# Create a new column and initialize with low_name
|
|
506
|
+
new_column = pd.Series(low_name, index=df.index, name=new_column_name)
|
|
507
|
+
|
|
508
|
+
# Default behavior: use '>' for single column comparison
|
|
509
|
+
if sum_comparison:
|
|
510
|
+
# Sum all values in specified columns and compare to threshold
|
|
511
|
+
sum_values = df[input_columns].sum(axis=1)
|
|
512
|
+
new_column[sum_values > threshold] = high_name
|
|
513
|
+
else:
|
|
514
|
+
# Check if any values in specified columns are above the threshold and update the new column accordingly
|
|
515
|
+
for col in input_columns:
|
|
516
|
+
# So that threshold is always in percent, if outputs are in ha, the code converts to percent (based on dividing by the geometry_area_column column.
|
|
517
|
+
# Clamping is needed due to differences in decimal places (meaning input values may go just over 100)
|
|
518
|
+
if unit_type == "ha":
|
|
519
|
+
df[geometry_area_column] = pd.to_numeric(
|
|
520
|
+
df[geometry_area_column], errors="coerce"
|
|
521
|
+
)
|
|
522
|
+
val_to_check = clamp(
|
|
523
|
+
((df[col] / df[geometry_area_column]) * 100), 0, 100
|
|
524
|
+
)
|
|
525
|
+
else:
|
|
526
|
+
val_to_check = df[col]
|
|
527
|
+
new_column[val_to_check > threshold] = high_name
|
|
528
|
+
|
|
529
|
+
# Concatenate the new column to the DataFrame
|
|
530
|
+
df = pd.concat([df, new_column], axis=1)
|
|
531
|
+
return df
|
|
532
|
+
|
|
533
|
+
|
|
534
|
+
def get_cols_ind_01_treecover(lookup_gee_datasets_df):
|
|
535
|
+
"""
|
|
536
|
+
Generate a list of dataset names for the treecover theme, excluding those marked for exclusion.
|
|
537
|
+
|
|
538
|
+
Args:
|
|
539
|
+
lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
|
|
540
|
+
|
|
541
|
+
Returns:
|
|
542
|
+
list: List of dataset names set to be used in the risk calculations for the treecover theme, excluding those marked for exclusion.
|
|
543
|
+
"""
|
|
544
|
+
lookup_gee_datasets_df = lookup_gee_datasets_df[
|
|
545
|
+
lookup_gee_datasets_df["exclude_from_output"] != 1
|
|
546
|
+
]
|
|
547
|
+
return list(
|
|
548
|
+
lookup_gee_datasets_df["name"][
|
|
549
|
+
(lookup_gee_datasets_df["use_for_risk"] == 1)
|
|
550
|
+
& (lookup_gee_datasets_df["theme"] == "treecover")
|
|
551
|
+
]
|
|
552
|
+
)
|
|
553
|
+
|
|
554
|
+
|
|
555
|
+
def get_cols_ind_02_commodities(lookup_gee_datasets_df):
|
|
556
|
+
"""
|
|
557
|
+
Generate a list of dataset names for the commodities theme, excluding those marked for exclusion.
|
|
558
|
+
|
|
559
|
+
Args:
|
|
560
|
+
lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
|
|
561
|
+
|
|
562
|
+
Returns:
|
|
563
|
+
list: List of dataset names set to be used in the risk calculations for the commodities theme, excluding those marked for exclusion.
|
|
564
|
+
"""
|
|
565
|
+
lookup_gee_datasets_df = lookup_gee_datasets_df[
|
|
566
|
+
lookup_gee_datasets_df["exclude_from_output"] != 1
|
|
567
|
+
]
|
|
568
|
+
return list(
|
|
569
|
+
lookup_gee_datasets_df["name"][
|
|
570
|
+
(lookup_gee_datasets_df["use_for_risk"] == 1)
|
|
571
|
+
& (lookup_gee_datasets_df["theme"] == "commodities")
|
|
572
|
+
]
|
|
573
|
+
)
|
|
574
|
+
|
|
575
|
+
|
|
576
|
+
def get_cols_ind_03_dist_before_2020(lookup_gee_datasets_df):
|
|
577
|
+
"""
|
|
578
|
+
Generate a list of dataset names for the disturbance before 2020 theme, excluding those marked for exclusion.
|
|
579
|
+
|
|
580
|
+
Args:
|
|
581
|
+
lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
|
|
582
|
+
|
|
583
|
+
Returns:
|
|
584
|
+
list: List of dataset names set to be used in the risk calculations for the disturbance before 2020 theme, excluding those marked for exclusion.
|
|
585
|
+
"""
|
|
586
|
+
lookup_gee_datasets_df = lookup_gee_datasets_df[
|
|
587
|
+
lookup_gee_datasets_df["exclude_from_output"] != 1
|
|
588
|
+
]
|
|
589
|
+
return list(
|
|
590
|
+
lookup_gee_datasets_df["name"][
|
|
591
|
+
(lookup_gee_datasets_df["use_for_risk"] == 1)
|
|
592
|
+
& (lookup_gee_datasets_df["theme"] == "disturbance_before")
|
|
593
|
+
]
|
|
594
|
+
)
|
|
595
|
+
|
|
596
|
+
|
|
597
|
+
def get_cols_ind_04_dist_after_2020(lookup_gee_datasets_df):
|
|
598
|
+
"""
|
|
599
|
+
Generate a list of dataset names for the disturbance after 2020 theme, excluding those marked for exclusion.
|
|
600
|
+
|
|
601
|
+
Args:
|
|
602
|
+
lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
|
|
603
|
+
|
|
604
|
+
Returns:
|
|
605
|
+
list: List of dataset names set to be used in the risk calculations for the disturbance after 2020 theme, excluding those marked for exclusion.
|
|
606
|
+
"""
|
|
607
|
+
lookup_gee_datasets_df = lookup_gee_datasets_df[
|
|
608
|
+
lookup_gee_datasets_df["exclude_from_output"] != 1
|
|
609
|
+
]
|
|
610
|
+
return list(
|
|
611
|
+
lookup_gee_datasets_df["name"][
|
|
612
|
+
(lookup_gee_datasets_df["use_for_risk"] == 1)
|
|
613
|
+
& (lookup_gee_datasets_df["theme"] == "disturbance_after")
|
|
614
|
+
]
|
|
615
|
+
)
|
|
616
|
+
|
|
617
|
+
|
|
618
|
+
def get_cols_ind_05_primary_2020(lookup_gee_datasets_df):
|
|
619
|
+
"""
|
|
620
|
+
Generate a list of dataset names for primary forests in 2020
|
|
621
|
+
|
|
622
|
+
Args:
|
|
623
|
+
lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
|
|
624
|
+
|
|
625
|
+
Returns:
|
|
626
|
+
list: List of dataset names set to be used in the risk calculations for the degradation - primary forest in 2020, excluding those marked for exclusion.
|
|
627
|
+
"""
|
|
628
|
+
lookup_gee_datasets_df = lookup_gee_datasets_df[
|
|
629
|
+
lookup_gee_datasets_df["exclude_from_output"] != 1
|
|
630
|
+
]
|
|
631
|
+
return list(
|
|
632
|
+
lookup_gee_datasets_df["name"][
|
|
633
|
+
(lookup_gee_datasets_df["use_for_risk_timber"] == 1)
|
|
634
|
+
& (lookup_gee_datasets_df["theme_timber"] == "primary")
|
|
635
|
+
]
|
|
636
|
+
)
|
|
637
|
+
|
|
638
|
+
|
|
639
|
+
def get_cols_ind_06_nat_reg_2020(lookup_gee_datasets_df):
|
|
640
|
+
"""
|
|
641
|
+
Generate a list of dataset names for naturally_reg_2020 forests in 2020
|
|
642
|
+
|
|
643
|
+
Args:
|
|
644
|
+
lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
|
|
645
|
+
|
|
646
|
+
Returns:
|
|
647
|
+
list: List of dataset names set to be used in the risk calculations for the degradation - naturally_reg_2020 in 2020, excluding those marked for exclusion.
|
|
648
|
+
"""
|
|
649
|
+
lookup_gee_datasets_df = lookup_gee_datasets_df[
|
|
650
|
+
lookup_gee_datasets_df["exclude_from_output"] != 1
|
|
651
|
+
]
|
|
652
|
+
return list(
|
|
653
|
+
lookup_gee_datasets_df["name"][
|
|
654
|
+
(lookup_gee_datasets_df["use_for_risk_timber"] == 1)
|
|
655
|
+
& (lookup_gee_datasets_df["theme_timber"] == "naturally_reg_2020")
|
|
656
|
+
]
|
|
657
|
+
)
|
|
658
|
+
|
|
659
|
+
|
|
660
|
+
def get_cols_ind_07_planted_2020(lookup_gee_datasets_df):
|
|
661
|
+
"""
|
|
662
|
+
Generate a list of dataset names for planted and plantation forests in 2020
|
|
663
|
+
|
|
664
|
+
Args:
|
|
665
|
+
lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
|
|
666
|
+
|
|
667
|
+
Returns:
|
|
668
|
+
list: List of dataset names set to be used in the risk calculations for the degradation - planted and plantation forests in 2020, excluding those marked for exclusion.
|
|
669
|
+
"""
|
|
670
|
+
lookup_gee_datasets_df = lookup_gee_datasets_df[
|
|
671
|
+
lookup_gee_datasets_df["exclude_from_output"] != 1
|
|
672
|
+
]
|
|
673
|
+
return list(
|
|
674
|
+
lookup_gee_datasets_df["name"][
|
|
675
|
+
(lookup_gee_datasets_df["use_for_risk_timber"] == 1)
|
|
676
|
+
& (lookup_gee_datasets_df["theme_timber"] == "planted_plantation_2020")
|
|
677
|
+
]
|
|
678
|
+
)
|
|
679
|
+
|
|
680
|
+
|
|
681
|
+
def get_cols_ind_08_planted_after_2020(lookup_gee_datasets_df):
|
|
682
|
+
"""
|
|
683
|
+
Generate a list of dataset names for planted and plantation forests post 2020
|
|
684
|
+
|
|
685
|
+
Args:
|
|
686
|
+
lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
|
|
687
|
+
|
|
688
|
+
Returns:
|
|
689
|
+
list: List of dataset names set to be used in the risk calculations for the degradation - planted and plantation forests post 2020, excluding those marked for exclusion.
|
|
690
|
+
"""
|
|
691
|
+
lookup_gee_datasets_df = lookup_gee_datasets_df[
|
|
692
|
+
lookup_gee_datasets_df["exclude_from_output"] != 1
|
|
693
|
+
]
|
|
694
|
+
return list(
|
|
695
|
+
lookup_gee_datasets_df["name"][
|
|
696
|
+
(lookup_gee_datasets_df["use_for_risk_timber"] == 1)
|
|
697
|
+
& (
|
|
698
|
+
lookup_gee_datasets_df["theme_timber"]
|
|
699
|
+
== "planted_plantation_after_2020"
|
|
700
|
+
)
|
|
701
|
+
]
|
|
702
|
+
)
|
|
703
|
+
|
|
704
|
+
|
|
705
|
+
def get_cols_ind_09_treecover_after_2020(lookup_gee_datasets_df):
|
|
706
|
+
"""
|
|
707
|
+
Generate a list of dataset names for treecover post 2020
|
|
708
|
+
|
|
709
|
+
Args:
|
|
710
|
+
lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
|
|
711
|
+
|
|
712
|
+
Returns:
|
|
713
|
+
list: List of dataset names set to be used in the risk calculations for the degradation - treecover post 2020, excluding those marked for exclusion.
|
|
714
|
+
"""
|
|
715
|
+
lookup_gee_datasets_df = lookup_gee_datasets_df[
|
|
716
|
+
lookup_gee_datasets_df["exclude_from_output"] != 1
|
|
717
|
+
]
|
|
718
|
+
return list(
|
|
719
|
+
lookup_gee_datasets_df["name"][
|
|
720
|
+
(lookup_gee_datasets_df["use_for_risk_timber"] == 1)
|
|
721
|
+
& (lookup_gee_datasets_df["theme_timber"] == "treecover_after_2020")
|
|
722
|
+
]
|
|
723
|
+
)
|
|
724
|
+
|
|
725
|
+
|
|
726
|
+
def get_cols_ind_10_agri_after_2020(lookup_gee_datasets_df):
|
|
727
|
+
"""
|
|
728
|
+
Generate a list of dataset names for croplands post 2020
|
|
729
|
+
|
|
730
|
+
Args:
|
|
731
|
+
lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
|
|
732
|
+
|
|
733
|
+
Returns:
|
|
734
|
+
list: List of dataset names set to be used in the risk calculations for the degradation - croplands post 2020, excluding those marked for exclusion.
|
|
735
|
+
"""
|
|
736
|
+
lookup_gee_datasets_df = lookup_gee_datasets_df[
|
|
737
|
+
lookup_gee_datasets_df["exclude_from_output"] != 1
|
|
738
|
+
]
|
|
739
|
+
return list(
|
|
740
|
+
lookup_gee_datasets_df["name"][
|
|
741
|
+
(lookup_gee_datasets_df["use_for_risk_timber"] == 1)
|
|
742
|
+
& (lookup_gee_datasets_df["theme_timber"] == "agri_after_2020")
|
|
743
|
+
]
|
|
744
|
+
)
|
|
745
|
+
|
|
746
|
+
|
|
747
|
+
def get_cols_ind_11_logging_before_2020(lookup_gee_datasets_df):
|
|
748
|
+
"""
|
|
749
|
+
Generate a list of dataset names for logging concessions (2020 if available)
|
|
750
|
+
|
|
751
|
+
Args:
|
|
752
|
+
lookup_gee_datasets_df (pd.DataFrame): DataFrame containing dataset information.
|
|
753
|
+
|
|
754
|
+
Returns:
|
|
755
|
+
list: List of dataset names set to be used in the risk calculations for the degradation - logging concessions, excluding those marked for exclusion.
|
|
756
|
+
"""
|
|
757
|
+
lookup_gee_datasets_df = lookup_gee_datasets_df[
|
|
758
|
+
lookup_gee_datasets_df["exclude_from_output"] != 1
|
|
759
|
+
]
|
|
760
|
+
return list(
|
|
761
|
+
lookup_gee_datasets_df["name"][
|
|
762
|
+
(lookup_gee_datasets_df["use_for_risk_timber"] == 1)
|
|
763
|
+
& (lookup_gee_datasets_df["theme_timber"] == "logging_concession")
|
|
764
|
+
]
|
|
765
|
+
)
|
|
766
|
+
|
|
767
|
+
|
|
768
|
+
def clamp(
|
|
769
|
+
value: float | pd.Series, min_val: float, max_val: float
|
|
770
|
+
) -> float | pd.Series:
|
|
771
|
+
"""
|
|
772
|
+
Clamp a value or a Pandas Series within a specified range.
|
|
773
|
+
|
|
774
|
+
Args:
|
|
775
|
+
value (float | pd.Series): The value or series to be clamped.
|
|
776
|
+
min_val (float): The minimum value of the range.
|
|
777
|
+
max_val (float): The maximum value of the range.
|
|
778
|
+
|
|
779
|
+
Returns:
|
|
780
|
+
float | pd.Series: The clamped value or series within the range.
|
|
781
|
+
"""
|
|
782
|
+
if isinstance(value, pd.Series):
|
|
783
|
+
return value.clip(lower=min_val, upper=max_val)
|
|
784
|
+
else:
|
|
785
|
+
return max(min_val, min(value, max_val))
|
|
786
|
+
|
|
787
|
+
|
|
788
|
+
def check_range(value: float) -> None:
|
|
789
|
+
if not (0 <= value <= 100):
|
|
790
|
+
raise ValueError("Value must be between 0 and 100.")
|
|
791
|
+
|
|
792
|
+
|
|
793
|
+
def add_custom_bands_info_to_lookup(
|
|
794
|
+
lookup_df: pd.DataFrame, custom_bands_info: dict, df_columns: list
|
|
795
|
+
) -> pd.DataFrame:
|
|
796
|
+
"""
|
|
797
|
+
Add custom bands to the lookup DataFrame for risk calculations.
|
|
798
|
+
|
|
799
|
+
Parameters
|
|
800
|
+
----------
|
|
801
|
+
lookup_df : pd.DataFrame
|
|
802
|
+
Original lookup DataFrame
|
|
803
|
+
custom_bands_info : dict
|
|
804
|
+
Custom band definitions with risk info
|
|
805
|
+
df_columns : list
|
|
806
|
+
List of columns in the actual data DataFrame
|
|
807
|
+
|
|
808
|
+
Returns
|
|
809
|
+
-------
|
|
810
|
+
pd.DataFrame
|
|
811
|
+
Lookup DataFrame with custom bands added
|
|
812
|
+
"""
|
|
813
|
+
custom_rows = []
|
|
814
|
+
|
|
815
|
+
for band_name, band_info in custom_bands_info.items():
|
|
816
|
+
# Only add bands that actually exist in the DataFrame
|
|
817
|
+
if band_name in df_columns:
|
|
818
|
+
custom_row = {
|
|
819
|
+
"name": band_name, # Use the band name as provided
|
|
820
|
+
"theme": band_info.get(
|
|
821
|
+
"theme", pd.NA
|
|
822
|
+
), # default to empty if not provided
|
|
823
|
+
"theme_timber": band_info.get(
|
|
824
|
+
"theme_timber", pd.NA
|
|
825
|
+
), # default to empty if not provided
|
|
826
|
+
"use_for_risk": band_info.get(
|
|
827
|
+
"use_for_risk", 0
|
|
828
|
+
), # default to 0 if not provided
|
|
829
|
+
"use_for_risk_timber": band_info.get(
|
|
830
|
+
"use_for_risk_timber", 0
|
|
831
|
+
), # default to 0 if not provided
|
|
832
|
+
"exclude_from_output": 0, # 0 here is so we don't exclude custom bands
|
|
833
|
+
"ISO2_code": pd.NA, # Global, i.e., empty string, by default
|
|
834
|
+
# Add other required columns with defaults
|
|
835
|
+
"col_type": "float64", # default to float64 if not provided
|
|
836
|
+
"is_nullable": 1,
|
|
837
|
+
"is_required": 0,
|
|
838
|
+
"order": 9999, # Put at end unless specified otherwise
|
|
839
|
+
"corresponding_variable": pd.NA, # not necessary for custom bands
|
|
840
|
+
}
|
|
841
|
+
custom_rows.append(custom_row)
|
|
842
|
+
|
|
843
|
+
if custom_rows:
|
|
844
|
+
custom_df = pd.DataFrame(custom_rows)
|
|
845
|
+
# Combine with original lookup
|
|
846
|
+
lookup_df = pd.concat([lookup_df, custom_df], ignore_index=True)
|
|
847
|
+
|
|
848
|
+
return lookup_df
|