cavapy 1.1.0__py3-none-any.whl → 1.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cavapy might be problematic. Click here for more details.
cavapy.py
CHANGED
|
@@ -1,1177 +1,523 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
import
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
from
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
"
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
)
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
],
|
|
525
|
-
"max_lat": [28, 21.7, 26.5, 13.75, 45.75, 65.0, 18.75, 28.5, 42.75, 69.75],
|
|
526
|
-
"cordex_domain": [
|
|
527
|
-
"NAM-22",
|
|
528
|
-
"EUR-22",
|
|
529
|
-
"SEA-22",
|
|
530
|
-
"AUS-22",
|
|
531
|
-
"WAS-22",
|
|
532
|
-
"EAS-22",
|
|
533
|
-
"SAM-22",
|
|
534
|
-
"CAM-22",
|
|
535
|
-
"AFR-22",
|
|
536
|
-
"CAS-22",
|
|
537
|
-
],
|
|
538
|
-
}
|
|
539
|
-
)
|
|
540
|
-
|
|
541
|
-
def is_bbox_contained(bbox, domain):
|
|
542
|
-
"""Check if bbox is contained within the domain bounding box."""
|
|
543
|
-
return (
|
|
544
|
-
bbox[0] >= domain["min_lon"]
|
|
545
|
-
and bbox[1] >= domain["min_lat"]
|
|
546
|
-
and bbox[2] <= domain["max_lon"]
|
|
547
|
-
and bbox[3] <= domain["max_lat"]
|
|
548
|
-
)
|
|
549
|
-
|
|
550
|
-
user_bbox = [xlim[0], ylim[0], xlim[1], ylim[1]]
|
|
551
|
-
domain_row = cordex_domains_df[cordex_domains_df["cordex_domain"] == cordex_domain]
|
|
552
|
-
|
|
553
|
-
if domain_row.empty:
|
|
554
|
-
raise ValueError(f"CORDEX domain '{cordex_domain}' is not recognized.")
|
|
555
|
-
|
|
556
|
-
domain_bbox = domain_row.iloc[0]
|
|
557
|
-
|
|
558
|
-
if not is_bbox_contained(user_bbox, domain_bbox):
|
|
559
|
-
suggested_domains = cordex_domains_df[
|
|
560
|
-
cordex_domains_df.apply(
|
|
561
|
-
lambda row: is_bbox_contained(user_bbox, row), axis=1
|
|
562
|
-
)
|
|
563
|
-
]
|
|
564
|
-
|
|
565
|
-
if suggested_domains.empty:
|
|
566
|
-
raise ValueError(
|
|
567
|
-
f"The bounding box {user_bbox} is outside of all available CORDEX domains."
|
|
568
|
-
)
|
|
569
|
-
|
|
570
|
-
suggested_domain = suggested_domains.iloc[0]["cordex_domain"]
|
|
571
|
-
|
|
572
|
-
raise ValueError(
|
|
573
|
-
f"Bounding box {user_bbox} is not within '{cordex_domain}'. Suggested domain: '{suggested_domain}'."
|
|
574
|
-
)
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
def _leave_one_out_bias_correction(ref, hist, variable, log):
|
|
578
|
-
"""
|
|
579
|
-
Perform leave-one-out cross-validation for bias correction to avoid overfitting.
|
|
580
|
-
|
|
581
|
-
Args:
|
|
582
|
-
ref: Reference (observational) data
|
|
583
|
-
hist: Historical model data
|
|
584
|
-
variable: Variable name for determining correction method
|
|
585
|
-
log: Logger instance
|
|
586
|
-
|
|
587
|
-
Returns:
|
|
588
|
-
xr.DataArray: Bias-corrected historical data
|
|
589
|
-
"""
|
|
590
|
-
log.info("Starting leave-one-out cross-validation for bias correction")
|
|
591
|
-
|
|
592
|
-
# Get unique years from historical data
|
|
593
|
-
hist_years = hist.time.dt.year.values
|
|
594
|
-
unique_years = np.unique(hist_years)
|
|
595
|
-
|
|
596
|
-
# Initialize list to store corrected data for each year
|
|
597
|
-
corrected_years = []
|
|
598
|
-
|
|
599
|
-
for leave_out_year in unique_years:
|
|
600
|
-
log.info(f"Processing leave-out year: {leave_out_year}")
|
|
601
|
-
|
|
602
|
-
# Create masks for training (all years except leave_out_year) and testing (only leave_out_year)
|
|
603
|
-
train_mask = hist.time.dt.year != leave_out_year
|
|
604
|
-
test_mask = hist.time.dt.year == leave_out_year
|
|
605
|
-
|
|
606
|
-
# Get training data (all years except the current one)
|
|
607
|
-
hist_train = hist.sel(time=train_mask)
|
|
608
|
-
hist_test = hist.sel(time=test_mask)
|
|
609
|
-
|
|
610
|
-
# Get corresponding reference data for training period
|
|
611
|
-
ref_train_mask = ref.time.dt.year != leave_out_year
|
|
612
|
-
ref_train = ref.sel(time=ref_train_mask)
|
|
613
|
-
|
|
614
|
-
# Train the bias correction model on the training data
|
|
615
|
-
QM_leave_out = sdba.EmpiricalQuantileMapping.train(
|
|
616
|
-
ref_train,
|
|
617
|
-
hist_train,
|
|
618
|
-
group="time.month",
|
|
619
|
-
kind="*" if variable in ["pr", "rsds", "sfcWind"] else "+",
|
|
620
|
-
)
|
|
621
|
-
|
|
622
|
-
# Apply bias correction to the left-out year
|
|
623
|
-
hist_corrected_year = QM_leave_out.adjust(
|
|
624
|
-
hist_test, extrapolation="constant", interp="linear"
|
|
625
|
-
)
|
|
626
|
-
|
|
627
|
-
# Apply variable-specific constraints
|
|
628
|
-
if variable == "hurs":
|
|
629
|
-
hist_corrected_year = hist_corrected_year.where(hist_corrected_year <= 100, 100)
|
|
630
|
-
hist_corrected_year = hist_corrected_year.where(hist_corrected_year >= 0, 0)
|
|
631
|
-
|
|
632
|
-
corrected_years.append(hist_corrected_year)
|
|
633
|
-
|
|
634
|
-
# Concatenate all corrected years and sort by time
|
|
635
|
-
hist_bs = xr.concat(corrected_years, dim="time").sortby("time")
|
|
636
|
-
|
|
637
|
-
log.info("Leave-one-out cross-validation bias correction completed")
|
|
638
|
-
return hist_bs
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
def process_worker(num_threads, **kwargs) -> xr.DataArray:
|
|
642
|
-
variable = kwargs["variable"]
|
|
643
|
-
log = logger.getChild(variable)
|
|
644
|
-
try:
|
|
645
|
-
with ThreadPoolExecutor(
|
|
646
|
-
max_workers=num_threads, thread_name_prefix="climate"
|
|
647
|
-
) as executor:
|
|
648
|
-
return _climate_data_for_variable(executor, **kwargs)
|
|
649
|
-
except Exception as e:
|
|
650
|
-
log.exception(f"Process worker failed: {e}")
|
|
651
|
-
raise
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
def _climate_data_for_variable(
|
|
655
|
-
executor: ThreadPoolExecutor,
|
|
656
|
-
*,
|
|
657
|
-
variable: str,
|
|
658
|
-
bbox: dict[str, tuple[float, float]],
|
|
659
|
-
cordex_domain: str,
|
|
660
|
-
rcp: str,
|
|
661
|
-
gcm: str,
|
|
662
|
-
rcm: str,
|
|
663
|
-
years_up_to: int,
|
|
664
|
-
years_obs: range,
|
|
665
|
-
obs: bool,
|
|
666
|
-
bias_correction: bool,
|
|
667
|
-
historical: bool,
|
|
668
|
-
remote: bool,
|
|
669
|
-
dataset: str = "CORDEX-CORE",
|
|
670
|
-
) -> xr.DataArray:
|
|
671
|
-
log = logger.getChild(variable)
|
|
672
|
-
|
|
673
|
-
pd.options.mode.chained_assignment = None
|
|
674
|
-
inventory_csv_url = (
|
|
675
|
-
INVENTORY_DATA_REMOTE_URL if remote else INVENTORY_DATA_LOCAL_PATH
|
|
676
|
-
)
|
|
677
|
-
data = pd.read_csv(inventory_csv_url)
|
|
678
|
-
column_to_use = "location" if remote else "hub"
|
|
679
|
-
|
|
680
|
-
# Filter data based on whether we need historical data
|
|
681
|
-
experiments = [rcp]
|
|
682
|
-
if historical or bias_correction:
|
|
683
|
-
experiments.append("historical")
|
|
684
|
-
|
|
685
|
-
# Determine activity filter based on dataset
|
|
686
|
-
activity_filter = "FAO" if dataset == "CORDEX-CORE" else "CRDX-ISIMIP-025"
|
|
687
|
-
|
|
688
|
-
filtered_data = data[
|
|
689
|
-
lambda x: (x["activity"].str.contains(activity_filter, na=False))
|
|
690
|
-
& (x["domain"] == cordex_domain)
|
|
691
|
-
& (x["model"].str.contains(gcm, na=False))
|
|
692
|
-
& (x["rcm"].str.contains(rcm, na=False))
|
|
693
|
-
& (x["experiment"].isin(experiments))
|
|
694
|
-
][["experiment", column_to_use]]
|
|
695
|
-
|
|
696
|
-
# Fail early if nothing is found
|
|
697
|
-
_ensure_inventory_not_empty(
|
|
698
|
-
filtered_data,
|
|
699
|
-
dataset=dataset,
|
|
700
|
-
cordex_domain=cordex_domain,
|
|
701
|
-
gcm=gcm,
|
|
702
|
-
rcm=rcm,
|
|
703
|
-
experiments=experiments,
|
|
704
|
-
activity_filter=activity_filter,
|
|
705
|
-
log=log,
|
|
706
|
-
)
|
|
707
|
-
|
|
708
|
-
future_obs = None
|
|
709
|
-
if obs or bias_correction:
|
|
710
|
-
future_obs = executor.submit(
|
|
711
|
-
_thread_download_data,
|
|
712
|
-
url=None,
|
|
713
|
-
bbox=bbox,
|
|
714
|
-
variable=variable,
|
|
715
|
-
obs=True,
|
|
716
|
-
years_up_to=years_up_to,
|
|
717
|
-
years_obs=years_obs,
|
|
718
|
-
remote=remote,
|
|
719
|
-
)
|
|
720
|
-
|
|
721
|
-
if not obs:
|
|
722
|
-
download_fn = partial(
|
|
723
|
-
_thread_download_data,
|
|
724
|
-
bbox=bbox,
|
|
725
|
-
variable=variable,
|
|
726
|
-
obs=False,
|
|
727
|
-
years_obs=years_obs,
|
|
728
|
-
years_up_to=years_up_to,
|
|
729
|
-
remote=remote,
|
|
730
|
-
)
|
|
731
|
-
downloaded_models = list(
|
|
732
|
-
executor.map(download_fn, filtered_data[column_to_use])
|
|
733
|
-
)
|
|
734
|
-
|
|
735
|
-
# Add the downloaded models to the DataFrame
|
|
736
|
-
filtered_data["models"] = downloaded_models
|
|
737
|
-
|
|
738
|
-
if historical or bias_correction:
|
|
739
|
-
hist = filtered_data[filtered_data["experiment"] == "historical"]["models"].iloc[0]
|
|
740
|
-
proj = filtered_data[filtered_data["experiment"] == rcp]["models"].iloc[0]
|
|
741
|
-
|
|
742
|
-
hist = hist.interpolate_na(dim="time", method="linear")
|
|
743
|
-
proj = proj.interpolate_na(dim="time", method="linear")
|
|
744
|
-
else:
|
|
745
|
-
proj = filtered_data["models"].iloc[0]
|
|
746
|
-
proj = proj.interpolate_na(dim="time", method="linear")
|
|
747
|
-
|
|
748
|
-
if bias_correction and historical:
|
|
749
|
-
# Load observations for bias correction
|
|
750
|
-
ref = future_obs.result()
|
|
751
|
-
log.info("Training eqm with leave-one-out cross-validation")
|
|
752
|
-
|
|
753
|
-
# Use leave-one-out cross-validation for historical bias correction
|
|
754
|
-
hist_bs = _leave_one_out_bias_correction(ref, hist, variable, log)
|
|
755
|
-
|
|
756
|
-
# For projections, train on all historical data
|
|
757
|
-
QM_mo = sdba.EmpiricalQuantileMapping.train(
|
|
758
|
-
ref,
|
|
759
|
-
hist,
|
|
760
|
-
group="time.month",
|
|
761
|
-
kind="*" if variable in ["pr", "rsds", "sfcWind"] else "+",
|
|
762
|
-
)
|
|
763
|
-
log.info("Performing bias correction on projections with full historical training")
|
|
764
|
-
proj_bs = QM_mo.adjust(proj, extrapolation="constant", interp="linear")
|
|
765
|
-
log.info("Done!")
|
|
766
|
-
if variable == "hurs":
|
|
767
|
-
proj_bs = proj_bs.where(proj_bs <= 100, 100)
|
|
768
|
-
proj_bs = proj_bs.where(proj_bs >= 0, 0)
|
|
769
|
-
combined = xr.concat([hist_bs, proj_bs], dim="time")
|
|
770
|
-
return combined
|
|
771
|
-
|
|
772
|
-
elif not bias_correction and historical:
|
|
773
|
-
combined = xr.concat([hist, proj], dim="time")
|
|
774
|
-
return combined
|
|
775
|
-
|
|
776
|
-
elif bias_correction and not historical:
|
|
777
|
-
ref = future_obs.result()
|
|
778
|
-
log.info("Training eqm with historical data")
|
|
779
|
-
QM_mo = sdba.EmpiricalQuantileMapping.train(
|
|
780
|
-
ref,
|
|
781
|
-
hist,
|
|
782
|
-
group="time.month",
|
|
783
|
-
kind="*" if variable in ["pr", "rsds", "sfcWind"] else "+",
|
|
784
|
-
) # multiplicative approach for pr, rsds and wind speed
|
|
785
|
-
log.info("Performing bias correction with eqm")
|
|
786
|
-
proj_bs = QM_mo.adjust(proj, extrapolation="constant", interp="linear")
|
|
787
|
-
log.info("Done!")
|
|
788
|
-
if variable == "hurs":
|
|
789
|
-
proj_bs = proj_bs.where(proj_bs <= 100, 100)
|
|
790
|
-
proj_bs = proj_bs.where(proj_bs >= 0, 0)
|
|
791
|
-
return proj_bs
|
|
792
|
-
|
|
793
|
-
return proj
|
|
794
|
-
|
|
795
|
-
else: # when observations are True
|
|
796
|
-
downloaded_obs = future_obs.result()
|
|
797
|
-
log.info("Done!")
|
|
798
|
-
return downloaded_obs
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
def _thread_download_data(url: str | None, **kwargs):
|
|
802
|
-
variable = kwargs["variable"]
|
|
803
|
-
temporal = "observations" if kwargs["obs"] else ("historical" if url and "historical" in url else "projections")
|
|
804
|
-
log = logger.getChild(f"{variable}-{temporal}")
|
|
805
|
-
try:
|
|
806
|
-
return _download_data(url=url, **kwargs)
|
|
807
|
-
except Exception as e:
|
|
808
|
-
log.exception(f"Failed to process data from {url}: {e}")
|
|
809
|
-
raise
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
def _download_data(
|
|
813
|
-
url: str | None,
|
|
814
|
-
bbox: dict[str, tuple[float, float]],
|
|
815
|
-
variable: str,
|
|
816
|
-
obs: bool,
|
|
817
|
-
years_obs: range,
|
|
818
|
-
years_up_to: int,
|
|
819
|
-
remote: bool,
|
|
820
|
-
) -> xr.DataArray:
|
|
821
|
-
temporal = "observations" if obs else ("historical" if url and "historical" in url else "projections")
|
|
822
|
-
log = logger.getChild(f"{variable}-{temporal}")
|
|
823
|
-
|
|
824
|
-
if obs:
|
|
825
|
-
var = VARIABLES_MAP[variable]
|
|
826
|
-
log.info(f"Establishing connection to ERA5 data for {variable}({var})")
|
|
827
|
-
if remote:
|
|
828
|
-
ds_var = xr.open_dataset(ERA5_DATA_REMOTE_URL)[var]
|
|
829
|
-
else:
|
|
830
|
-
ds_var = xr.open_dataset(ERA5_DATA_LOCAL_PATH)[var]
|
|
831
|
-
log.info(f"Connection to ERA5 data for {variable}({var}) has been established")
|
|
832
|
-
|
|
833
|
-
# Coordinate normalization and renaming for 'hurs'
|
|
834
|
-
if var == "hurs":
|
|
835
|
-
ds_var = ds_var.rename({"lat": "latitude", "lon": "longitude"})
|
|
836
|
-
ds_cropped = ds_var.sel(
|
|
837
|
-
longitude=slice(bbox["xlim"][0], bbox["xlim"][1]),
|
|
838
|
-
latitude=slice(bbox["ylim"][0], bbox["ylim"][1]),
|
|
839
|
-
)
|
|
840
|
-
else:
|
|
841
|
-
ds_var.coords["longitude"] = (ds_var.coords["longitude"] + 180) % 360 - 180
|
|
842
|
-
ds_var = ds_var.sortby(ds_var.longitude)
|
|
843
|
-
ds_cropped = ds_var.sel(
|
|
844
|
-
longitude=slice(bbox["xlim"][0], bbox["xlim"][1]),
|
|
845
|
-
latitude=slice(bbox["ylim"][1], bbox["ylim"][0]),
|
|
846
|
-
)
|
|
847
|
-
|
|
848
|
-
# Unit conversion
|
|
849
|
-
if var in ["t2mx", "t2mn", "t2m"]:
|
|
850
|
-
ds_cropped -= 273.15 # Convert from Kelvin to Celsius
|
|
851
|
-
ds_cropped.attrs["units"] = "°C"
|
|
852
|
-
elif var == "tp":
|
|
853
|
-
ds_cropped *= 1000 # Convert precipitation
|
|
854
|
-
ds_cropped.attrs["units"] = "mm"
|
|
855
|
-
elif var == "ssrd":
|
|
856
|
-
ds_cropped /= 86400 # Convert from J/m^2 to W/m^2
|
|
857
|
-
ds_cropped.attrs["units"] = "W m-2"
|
|
858
|
-
elif var == "sfcwind":
|
|
859
|
-
ds_cropped = ds_cropped * (
|
|
860
|
-
4.87 / np.log((67.8 * 10) - 5.42)
|
|
861
|
-
) # Convert wind speed from 10 m to 2 m
|
|
862
|
-
ds_cropped.attrs["units"] = "m s-1"
|
|
863
|
-
|
|
864
|
-
# Select years
|
|
865
|
-
years = [x for x in years_obs]
|
|
866
|
-
time_mask = (ds_cropped["time"].dt.year >= years[0]) & (
|
|
867
|
-
ds_cropped["time"].dt.year <= years[-1]
|
|
868
|
-
)
|
|
869
|
-
|
|
870
|
-
else:
|
|
871
|
-
log.info(f"Establishing connection to CORDEX data for {variable}")
|
|
872
|
-
ds_var = xr.open_dataset(url)[variable]
|
|
873
|
-
|
|
874
|
-
# Check if time dimension has a prefix, indicating variable is not available
|
|
875
|
-
time_dims = [dim for dim in ds_var.dims if dim.startswith('time_')]
|
|
876
|
-
if time_dims:
|
|
877
|
-
msg = f"Variable {variable} is not available for this model: {url}"
|
|
878
|
-
log.exception(msg)
|
|
879
|
-
raise ValueError(msg)
|
|
880
|
-
|
|
881
|
-
log.info(f"Connection to CORDEX data for {variable} has been established")
|
|
882
|
-
ds_cropped = ds_var.sel(
|
|
883
|
-
longitude=slice(bbox["xlim"][0], bbox["xlim"][1]),
|
|
884
|
-
latitude=slice(bbox["ylim"][1], bbox["ylim"][0]),
|
|
885
|
-
)
|
|
886
|
-
|
|
887
|
-
# Unit conversion
|
|
888
|
-
if variable in ["tas", "tasmax", "tasmin"]:
|
|
889
|
-
ds_cropped -= 273.15 # Convert from Kelvin to Celsius
|
|
890
|
-
ds_cropped.attrs["units"] = "°C"
|
|
891
|
-
elif variable == "pr":
|
|
892
|
-
ds_cropped *= 86400 # Convert from kg m^-2 s^-1 to mm/day
|
|
893
|
-
ds_cropped.attrs["units"] = "mm"
|
|
894
|
-
elif variable == "rsds":
|
|
895
|
-
ds_cropped.attrs["units"] = "W m-2"
|
|
896
|
-
elif variable == "sfcWind":
|
|
897
|
-
ds_cropped = ds_cropped * (
|
|
898
|
-
4.87 / np.log((67.8 * 10) - 5.42)
|
|
899
|
-
) # Convert wind speed from 10 m to 2 m
|
|
900
|
-
ds_cropped.attrs["units"] = "m s-1"
|
|
901
|
-
|
|
902
|
-
# Select years based on rcp
|
|
903
|
-
if "rcp" in url:
|
|
904
|
-
years = [x for x in range(2006, years_up_to + 1)]
|
|
905
|
-
else:
|
|
906
|
-
years = [x for x in DEFAULT_YEARS_OBS]
|
|
907
|
-
|
|
908
|
-
# Add missing dates
|
|
909
|
-
ds_cropped = ds_cropped.convert_calendar(
|
|
910
|
-
calendar="gregorian", missing=np.nan, align_on="date"
|
|
911
|
-
)
|
|
912
|
-
|
|
913
|
-
time_mask = (ds_cropped["time"].dt.year >= years[0]) & (
|
|
914
|
-
ds_cropped["time"].dt.year <= years[-1]
|
|
915
|
-
)
|
|
916
|
-
|
|
917
|
-
# subset years
|
|
918
|
-
ds_cropped = ds_cropped.sel(time=time_mask)
|
|
919
|
-
|
|
920
|
-
assert isinstance(ds_cropped, xr.DataArray)
|
|
921
|
-
|
|
922
|
-
if obs:
|
|
923
|
-
log.info(
|
|
924
|
-
f"ERA5 data for {variable} has been processed: unit conversion ({ds_cropped.attrs.get('units', 'unknown units')}), time selection ({years[0]}-{years[-1]})"
|
|
925
|
-
)
|
|
926
|
-
else:
|
|
927
|
-
log.info(
|
|
928
|
-
f"CORDEX data for {variable} has been processed: unit conversion ({ds_cropped.attrs.get('units', 'unknown units')}), calendar transformation (360-day to Gregorian), time selection ({years[0]}-{years[-1]})"
|
|
929
|
-
)
|
|
930
|
-
|
|
931
|
-
return ds_cropped
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
# =============================================================================
|
|
935
|
-
# PLOTTING FUNCTIONS
|
|
936
|
-
# =============================================================================
|
|
937
|
-
|
|
938
|
-
def plot_spatial_map(
|
|
939
|
-
data: xr.DataArray,
|
|
940
|
-
time_period: Optional[Tuple[int, int]] = None,
|
|
941
|
-
aggregation: str = "mean",
|
|
942
|
-
title: Optional[str] = None,
|
|
943
|
-
cmap: str = "viridis",
|
|
944
|
-
figsize: Tuple[int, int] = (12, 8),
|
|
945
|
-
show_countries: bool = True,
|
|
946
|
-
save_path: Optional[str] = None,
|
|
947
|
-
**kwargs
|
|
948
|
-
) -> plt.Figure:
|
|
949
|
-
"""
|
|
950
|
-
Create a spatial map visualization of climate data.
|
|
951
|
-
"""
|
|
952
|
-
# Subset data by time period if specified
|
|
953
|
-
plot_data = data.copy()
|
|
954
|
-
if time_period is not None:
|
|
955
|
-
start_year, end_year = time_period
|
|
956
|
-
plot_data = plot_data.sel(
|
|
957
|
-
time=slice(f"{start_year}-01-01", f"{end_year}-12-31")
|
|
958
|
-
)
|
|
959
|
-
|
|
960
|
-
# Apply temporal aggregation
|
|
961
|
-
if aggregation == "mean":
|
|
962
|
-
plot_data = plot_data.mean(dim="time")
|
|
963
|
-
elif aggregation == "sum":
|
|
964
|
-
plot_data = plot_data.sum(dim="time")
|
|
965
|
-
elif aggregation == "min":
|
|
966
|
-
plot_data = plot_data.min(dim="time")
|
|
967
|
-
elif aggregation == "max":
|
|
968
|
-
plot_data = plot_data.max(dim="time")
|
|
969
|
-
elif aggregation == "std":
|
|
970
|
-
plot_data = plot_data.std(dim="time")
|
|
971
|
-
else:
|
|
972
|
-
raise ValueError(f"Unsupported aggregation method: {aggregation}")
|
|
973
|
-
|
|
974
|
-
# Create figure with cartopy
|
|
975
|
-
fig, ax = plt.subplots(
|
|
976
|
-
figsize=figsize,
|
|
977
|
-
subplot_kw={'projection': ccrs.PlateCarree()}
|
|
978
|
-
)
|
|
979
|
-
|
|
980
|
-
# Plot data
|
|
981
|
-
im = plot_data.plot(
|
|
982
|
-
ax=ax,
|
|
983
|
-
cmap=cmap,
|
|
984
|
-
transform=ccrs.PlateCarree(),
|
|
985
|
-
add_colorbar=False,
|
|
986
|
-
**kwargs
|
|
987
|
-
)
|
|
988
|
-
|
|
989
|
-
# Add map features
|
|
990
|
-
ax.add_feature(cfeature.COASTLINE, linewidth=0.5)
|
|
991
|
-
if show_countries:
|
|
992
|
-
ax.add_feature(cfeature.BORDERS, linewidth=0.3, alpha=0.7)
|
|
993
|
-
ax.add_feature(cfeature.OCEAN, color='lightblue', alpha=0.3)
|
|
994
|
-
ax.add_feature(cfeature.LAND, color='lightgray', alpha=0.3)
|
|
995
|
-
|
|
996
|
-
# Set extent to data bounds with small buffer
|
|
997
|
-
lon_min, lon_max = plot_data.longitude.min().item(), plot_data.longitude.max().item()
|
|
998
|
-
lat_min, lat_max = plot_data.latitude.min().item(), plot_data.latitude.max().item()
|
|
999
|
-
buffer = 0.5
|
|
1000
|
-
ax.set_extent([lon_min - buffer, lon_max + buffer,
|
|
1001
|
-
lat_min - buffer, lat_max + buffer], ccrs.PlateCarree())
|
|
1002
|
-
|
|
1003
|
-
# Add gridlines with labels only on left and bottom
|
|
1004
|
-
gl = ax.gridlines(draw_labels=True, alpha=0.3)
|
|
1005
|
-
gl.top_labels = False
|
|
1006
|
-
gl.right_labels = False
|
|
1007
|
-
gl.left_labels = True
|
|
1008
|
-
gl.bottom_labels = True
|
|
1009
|
-
|
|
1010
|
-
# Add colorbar
|
|
1011
|
-
cbar = plt.colorbar(im, ax=ax, shrink=0.8, pad=0.02)
|
|
1012
|
-
if hasattr(plot_data, 'units'):
|
|
1013
|
-
cbar.set_label(f"{plot_data.name} ({plot_data.units})", rotation=270, labelpad=20)
|
|
1014
|
-
else:
|
|
1015
|
-
cbar.set_label(f"{plot_data.name}", rotation=270, labelpad=20)
|
|
1016
|
-
|
|
1017
|
-
# Set title
|
|
1018
|
-
if title is None:
|
|
1019
|
-
var_name = plot_data.name or "Climate Variable"
|
|
1020
|
-
if time_period:
|
|
1021
|
-
title = f"{aggregation.title()} {var_name} ({time_period[0]}-{time_period[1]})"
|
|
1022
|
-
else:
|
|
1023
|
-
title = f"{aggregation.title()} {var_name}"
|
|
1024
|
-
|
|
1025
|
-
ax.set_title(title, fontsize=14, pad=20)
|
|
1026
|
-
|
|
1027
|
-
plt.tight_layout()
|
|
1028
|
-
|
|
1029
|
-
if save_path:
|
|
1030
|
-
plt.savefig(save_path, dpi=300, bbox_inches='tight')
|
|
1031
|
-
|
|
1032
|
-
return fig
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
def plot_time_series(
|
|
1036
|
-
data: Union[xr.DataArray, List[xr.DataArray]],
|
|
1037
|
-
aggregation: str = "mean",
|
|
1038
|
-
labels: Optional[List[str]] = None,
|
|
1039
|
-
title: Optional[str] = None,
|
|
1040
|
-
ylabel: Optional[str] = None,
|
|
1041
|
-
figsize: Tuple[int, int] = (12, 6),
|
|
1042
|
-
trend_line: bool = False,
|
|
1043
|
-
save_path: Optional[str] = None,
|
|
1044
|
-
**kwargs
|
|
1045
|
-
) -> plt.Figure:
|
|
1046
|
-
"""
|
|
1047
|
-
Create time series plots of climate data.
|
|
1048
|
-
"""
|
|
1049
|
-
# Ensure data is a list
|
|
1050
|
-
if isinstance(data, xr.DataArray):
|
|
1051
|
-
data_list = [data]
|
|
1052
|
-
labels = labels or [data.name or "Data"]
|
|
1053
|
-
else:
|
|
1054
|
-
data_list = data
|
|
1055
|
-
labels = labels or [f"Dataset {i+1}" for i in range(len(data_list))]
|
|
1056
|
-
|
|
1057
|
-
if len(data_list) != len(labels):
|
|
1058
|
-
raise ValueError("Number of labels must match number of datasets")
|
|
1059
|
-
|
|
1060
|
-
# Set up the plot
|
|
1061
|
-
fig, ax1 = plt.subplots(figsize=figsize)
|
|
1062
|
-
|
|
1063
|
-
# Process and plot each dataset
|
|
1064
|
-
for i, (dataset, label) in enumerate(zip(data_list, labels)):
|
|
1065
|
-
# Apply spatial aggregation
|
|
1066
|
-
if aggregation == "mean":
|
|
1067
|
-
ts_data = dataset.mean(dim=["latitude", "longitude"])
|
|
1068
|
-
elif aggregation == "sum":
|
|
1069
|
-
ts_data = dataset.sum(dim=["latitude", "longitude"])
|
|
1070
|
-
elif aggregation == "min":
|
|
1071
|
-
ts_data = dataset.min(dim=["latitude", "longitude"])
|
|
1072
|
-
elif aggregation == "max":
|
|
1073
|
-
ts_data = dataset.max(dim=["latitude", "longitude"])
|
|
1074
|
-
elif aggregation == "std":
|
|
1075
|
-
ts_data = dataset.std(dim=["latitude", "longitude"])
|
|
1076
|
-
else:
|
|
1077
|
-
raise ValueError(f"Unsupported aggregation method: {aggregation}")
|
|
1078
|
-
|
|
1079
|
-
# Convert to annual means for cleaner plotting
|
|
1080
|
-
annual_data = ts_data.groupby("time.year").mean()
|
|
1081
|
-
|
|
1082
|
-
# Plot the time series
|
|
1083
|
-
ax1.plot(annual_data.year, annual_data.values, label=label, linewidth=2, **kwargs)
|
|
1084
|
-
|
|
1085
|
-
# Add trend line if requested
|
|
1086
|
-
if trend_line:
|
|
1087
|
-
z = np.polyfit(annual_data.year, annual_data.values, 1)
|
|
1088
|
-
p = np.poly1d(z)
|
|
1089
|
-
ax1.plot(annual_data.year, p(annual_data.year),
|
|
1090
|
-
linestyle='--', alpha=0.7,
|
|
1091
|
-
color=ax1.lines[-1].get_color())
|
|
1092
|
-
|
|
1093
|
-
# Format main plot
|
|
1094
|
-
ax1.set_xlabel("Year", fontsize=12)
|
|
1095
|
-
if ylabel is None:
|
|
1096
|
-
if hasattr(data_list[0], 'units'):
|
|
1097
|
-
ylabel = f"{data_list[0].name} ({data_list[0].units})"
|
|
1098
|
-
else:
|
|
1099
|
-
ylabel = data_list[0].name or "Value"
|
|
1100
|
-
ax1.set_ylabel(ylabel, fontsize=12)
|
|
1101
|
-
|
|
1102
|
-
if len(data_list) > 1:
|
|
1103
|
-
ax1.legend()
|
|
1104
|
-
|
|
1105
|
-
ax1.grid(True, alpha=0.3)
|
|
1106
|
-
|
|
1107
|
-
# Set main title
|
|
1108
|
-
if title is None:
|
|
1109
|
-
var_name = data_list[0].name or "Climate Variable"
|
|
1110
|
-
title = f"{aggregation.title()} {var_name} Time Series"
|
|
1111
|
-
|
|
1112
|
-
ax1.set_title(title, fontsize=14, pad=20)
|
|
1113
|
-
|
|
1114
|
-
plt.tight_layout()
|
|
1115
|
-
|
|
1116
|
-
if save_path:
|
|
1117
|
-
plt.savefig(save_path, dpi=300, bbox_inches='tight')
|
|
1118
|
-
|
|
1119
|
-
return fig
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
if __name__ == "__main__":
|
|
1123
|
-
# Example 1: Get observational data
|
|
1124
|
-
print("Getting observational data...")
|
|
1125
|
-
obs_data = get_climate_data(
|
|
1126
|
-
country="Togo",
|
|
1127
|
-
obs=True,
|
|
1128
|
-
years_obs=range(1990, 2011),
|
|
1129
|
-
variables=["pr", "tasmax"]
|
|
1130
|
-
)
|
|
1131
|
-
print("Observational data keys:", list(obs_data.keys()))
|
|
1132
|
-
|
|
1133
|
-
# Example 2: Get CORDEX bc projection data and bc historical data
|
|
1134
|
-
print("\nGetting CORDEX projection data...")
|
|
1135
|
-
proj_data = get_climate_data(
|
|
1136
|
-
country="Togo",
|
|
1137
|
-
variables=["tasmax", "tasmin"],
|
|
1138
|
-
cordex_domain="AFR-22",
|
|
1139
|
-
rcp="rcp26",
|
|
1140
|
-
gcm="MPI",
|
|
1141
|
-
rcm="Reg",
|
|
1142
|
-
years_up_to=2010,
|
|
1143
|
-
historical=True,
|
|
1144
|
-
bias_correction=True
|
|
1145
|
-
)
|
|
1146
|
-
print("Projection data keys:", list(proj_data.keys()))
|
|
1147
|
-
|
|
1148
|
-
# Example 3: Get CORDEX-CORE-BC (ISIMIP bias-corrected) data
|
|
1149
|
-
print("\nGetting CORDEX-CORE-BC (ISIMIP bias-corrected) data...")
|
|
1150
|
-
proj_data_bc = get_climate_data(
|
|
1151
|
-
country="Togo",
|
|
1152
|
-
variables=["pr", "tasmax"],
|
|
1153
|
-
cordex_domain="AFR-22",
|
|
1154
|
-
rcp="rcp85",
|
|
1155
|
-
gcm="MPI",
|
|
1156
|
-
rcm="Reg",
|
|
1157
|
-
years_up_to=2030,
|
|
1158
|
-
historical=True,
|
|
1159
|
-
bias_correction=False, # Must be False when using CORDEX-CORE-BC
|
|
1160
|
-
dataset="CORDEX-CORE-BC"
|
|
1161
|
-
)
|
|
1162
|
-
print("CORDEX-CORE-BC data keys:", list(proj_data_bc.keys()))
|
|
1163
|
-
|
|
1164
|
-
# Example 4: Test new country lookup functionality
|
|
1165
|
-
print("\nTesting country lookup functionality...")
|
|
1166
|
-
try:
|
|
1167
|
-
# Test cartopy-based country lookup
|
|
1168
|
-
bounds = _get_country_bounds("Togo")
|
|
1169
|
-
print(f"Country lookup successful - Togo bounds: {bounds}")
|
|
1170
|
-
except Exception as e:
|
|
1171
|
-
print(f"Country lookup failed: {e}")
|
|
1172
|
-
|
|
1173
|
-
# Example 5: Plotting demonstrations (commented out to avoid blocking)
|
|
1174
|
-
print("\nPlotting functionality is available!")
|
|
1175
|
-
print("Use plot_spatial_map() and plot_time_series() functions")
|
|
1176
|
-
|
|
1177
|
-
print("Example completed successfully!")
|
|
1
|
+
"""Public API for retrieving and visualizing CAVA climate data."""
|
|
2
|
+
|
|
3
|
+
import multiprocessing as mp
|
|
4
|
+
import xarray as xr
|
|
5
|
+
|
|
6
|
+
from cava_config import (
|
|
7
|
+
DEFAULT_YEARS_OBS,
|
|
8
|
+
VALID_DATASETS,
|
|
9
|
+
VALID_DOMAINS,
|
|
10
|
+
VALID_GCM,
|
|
11
|
+
VALID_RCM,
|
|
12
|
+
VALID_RCPS,
|
|
13
|
+
VALID_VARIABLES,
|
|
14
|
+
logger,
|
|
15
|
+
)
|
|
16
|
+
from cava_download import process_worker
|
|
17
|
+
from cava_plot import plot_spatial_map, plot_time_series
|
|
18
|
+
from cava_validation import (
|
|
19
|
+
_geo_localize,
|
|
20
|
+
_get_country_bounds,
|
|
21
|
+
_validate_gcm_rcm_combinations,
|
|
22
|
+
_validate_urls,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _get_climate_data_single(
|
|
27
|
+
*,
|
|
28
|
+
country: str | None,
|
|
29
|
+
years_obs: range | None = None,
|
|
30
|
+
obs: bool = False,
|
|
31
|
+
cordex_domain: str | None = None,
|
|
32
|
+
rcp: str | None = None,
|
|
33
|
+
gcm: str | None = None,
|
|
34
|
+
rcm: str | None = None,
|
|
35
|
+
years_up_to: int | None = None,
|
|
36
|
+
bias_correction: bool = False,
|
|
37
|
+
historical: bool = False,
|
|
38
|
+
buffer: int = 0,
|
|
39
|
+
xlim: tuple[float, float] | None = None,
|
|
40
|
+
ylim: tuple[float, float] | None = None,
|
|
41
|
+
remote: bool = True,
|
|
42
|
+
variables: list[str] | None = None,
|
|
43
|
+
num_processes: int = len(VALID_VARIABLES),
|
|
44
|
+
max_threads_per_process: int = 3,
|
|
45
|
+
dataset: str = "CORDEX-CORE",
|
|
46
|
+
) -> dict[str, xr.DataArray]:
|
|
47
|
+
"""Internal single-combination fetch (one rcp/gcm/rcm), preserves legacy behavior."""
|
|
48
|
+
|
|
49
|
+
# Validation for basic parameters
|
|
50
|
+
if xlim is None and ylim is not None or xlim is not None and ylim is None:
|
|
51
|
+
raise ValueError(
|
|
52
|
+
"xlim and ylim mismatch: they must be both specified or both unspecified"
|
|
53
|
+
)
|
|
54
|
+
if country is None and xlim is None:
|
|
55
|
+
raise ValueError("You must specify a country or (xlim, ylim)")
|
|
56
|
+
if country is not None and xlim is not None:
|
|
57
|
+
raise ValueError("You must specify either country or (xlim, ylim), not both")
|
|
58
|
+
|
|
59
|
+
# Conditional validation based on obs flag
|
|
60
|
+
if obs:
|
|
61
|
+
# When obs=True, only years_obs is required
|
|
62
|
+
if years_obs is None:
|
|
63
|
+
raise ValueError("years_obs must be provided when obs is True")
|
|
64
|
+
if not (1980 <= min(years_obs) <= max(years_obs) <= 2020):
|
|
65
|
+
raise ValueError("Years in years_obs must be within the range 1980 to 2020")
|
|
66
|
+
|
|
67
|
+
# Set default values for CORDEX parameters (not used but needed for function calls)
|
|
68
|
+
cordex_domain = cordex_domain or "AFR-22" # dummy value
|
|
69
|
+
rcp = rcp or "rcp26" # dummy value
|
|
70
|
+
gcm = gcm or "MPI" # dummy value
|
|
71
|
+
rcm = rcm or "Reg" # dummy value
|
|
72
|
+
years_up_to = years_up_to or 2030 # dummy value
|
|
73
|
+
else:
|
|
74
|
+
# When obs=False, CORDEX parameters are required
|
|
75
|
+
required_params = {
|
|
76
|
+
"cordex_domain": VALID_DOMAINS,
|
|
77
|
+
"rcp": VALID_RCPS,
|
|
78
|
+
"gcm": VALID_GCM,
|
|
79
|
+
"rcm": VALID_RCM,
|
|
80
|
+
}
|
|
81
|
+
for param_name, valid_values in required_params.items():
|
|
82
|
+
param_value = locals()[param_name]
|
|
83
|
+
if param_value is None:
|
|
84
|
+
raise ValueError(f"{param_name} is required when obs is False")
|
|
85
|
+
if param_value not in valid_values:
|
|
86
|
+
raise ValueError(
|
|
87
|
+
f"Invalid {param_name}={param_value}. Must be one of {valid_values}"
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
if years_up_to is None:
|
|
91
|
+
raise ValueError("years_up_to is required when obs is False")
|
|
92
|
+
if years_up_to <= 2006:
|
|
93
|
+
raise ValueError("years_up_to must be greater than 2006")
|
|
94
|
+
|
|
95
|
+
# Set default years_obs when not processing observations
|
|
96
|
+
if years_obs is None:
|
|
97
|
+
years_obs = DEFAULT_YEARS_OBS
|
|
98
|
+
|
|
99
|
+
# Validate dataset parameter
|
|
100
|
+
if dataset not in VALID_DATASETS:
|
|
101
|
+
raise ValueError(
|
|
102
|
+
f"Invalid dataset='{dataset}'. Must be one of {VALID_DATASETS}"
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
# Check for incompatible dataset and bias_correction combination
|
|
106
|
+
if dataset == "CORDEX-CORE-BC" and bias_correction:
|
|
107
|
+
raise ValueError(
|
|
108
|
+
"Cannot apply bias_correction=True when using dataset='CORDEX-CORE-BC'. "
|
|
109
|
+
"The CORDEX-CORE-BC dataset is already bias-corrected using ISIMIP methodology."
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
# Validate variables if provided
|
|
113
|
+
if variables is not None:
|
|
114
|
+
invalid_vars = [var for var in variables if var not in VALID_VARIABLES]
|
|
115
|
+
if invalid_vars:
|
|
116
|
+
raise ValueError(
|
|
117
|
+
f"Invalid variables: {invalid_vars}. Must be a subset of {VALID_VARIABLES}"
|
|
118
|
+
)
|
|
119
|
+
else:
|
|
120
|
+
variables = VALID_VARIABLES
|
|
121
|
+
|
|
122
|
+
# Validate GCM-RCM combinations for specific domains (only for non-observational data)
|
|
123
|
+
if not obs:
|
|
124
|
+
_validate_gcm_rcm_combinations(cordex_domain, gcm, rcm)
|
|
125
|
+
|
|
126
|
+
_validate_urls(
|
|
127
|
+
gcm,
|
|
128
|
+
rcm,
|
|
129
|
+
rcp,
|
|
130
|
+
remote,
|
|
131
|
+
cordex_domain,
|
|
132
|
+
obs,
|
|
133
|
+
historical,
|
|
134
|
+
bias_correction,
|
|
135
|
+
dataset,
|
|
136
|
+
variables,
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
bbox = _geo_localize(country, xlim, ylim, buffer, cordex_domain, obs)
|
|
140
|
+
|
|
141
|
+
if num_processes <= 1 or len(variables) <= 1:
|
|
142
|
+
results = {}
|
|
143
|
+
for variable in variables:
|
|
144
|
+
try:
|
|
145
|
+
results[variable] = process_worker(
|
|
146
|
+
max_threads_per_process,
|
|
147
|
+
variable=variable,
|
|
148
|
+
bbox=bbox,
|
|
149
|
+
cordex_domain=cordex_domain,
|
|
150
|
+
rcp=rcp,
|
|
151
|
+
gcm=gcm,
|
|
152
|
+
rcm=rcm,
|
|
153
|
+
years_up_to=years_up_to,
|
|
154
|
+
years_obs=years_obs,
|
|
155
|
+
obs=obs,
|
|
156
|
+
bias_correction=bias_correction,
|
|
157
|
+
historical=historical,
|
|
158
|
+
remote=remote,
|
|
159
|
+
dataset=dataset,
|
|
160
|
+
)
|
|
161
|
+
except Exception as exc:
|
|
162
|
+
raise RuntimeError(
|
|
163
|
+
f"Variable '{variable}' failed for {gcm}-{rcm} {rcp}"
|
|
164
|
+
) from exc
|
|
165
|
+
return results
|
|
166
|
+
|
|
167
|
+
with mp.Pool(processes=min(num_processes, len(variables))) as pool:
|
|
168
|
+
futures = []
|
|
169
|
+
for variable in variables:
|
|
170
|
+
futures.append(
|
|
171
|
+
pool.apply_async(
|
|
172
|
+
process_worker,
|
|
173
|
+
args=(max_threads_per_process,),
|
|
174
|
+
kwds={
|
|
175
|
+
"variable": variable,
|
|
176
|
+
"bbox": bbox,
|
|
177
|
+
"cordex_domain": cordex_domain,
|
|
178
|
+
"rcp": rcp,
|
|
179
|
+
"gcm": gcm,
|
|
180
|
+
"rcm": rcm,
|
|
181
|
+
"years_up_to": years_up_to,
|
|
182
|
+
"years_obs": years_obs,
|
|
183
|
+
"obs": obs,
|
|
184
|
+
"bias_correction": bias_correction,
|
|
185
|
+
"historical": historical,
|
|
186
|
+
"remote": remote,
|
|
187
|
+
"dataset": dataset,
|
|
188
|
+
},
|
|
189
|
+
)
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
try:
|
|
193
|
+
results = {
|
|
194
|
+
variable: futures[i].get() for i, variable in enumerate(variables)
|
|
195
|
+
}
|
|
196
|
+
except Exception as exc:
|
|
197
|
+
pool.terminate()
|
|
198
|
+
pool.join()
|
|
199
|
+
raise RuntimeError(
|
|
200
|
+
f"Variable processing failed for {gcm}-{rcm} {rcp}"
|
|
201
|
+
) from exc
|
|
202
|
+
|
|
203
|
+
pool.close() # Prevent any more tasks from being submitted to the pool
|
|
204
|
+
pool.join() # Wait for all worker processes to finish
|
|
205
|
+
|
|
206
|
+
return results
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
def _normalize_selection(
|
|
210
|
+
value: str | list[str] | None,
|
|
211
|
+
valid_values: list[str],
|
|
212
|
+
name: str,
|
|
213
|
+
) -> tuple[list[str], bool]:
|
|
214
|
+
if value is None:
|
|
215
|
+
return list(valid_values), True
|
|
216
|
+
if isinstance(value, str):
|
|
217
|
+
values = [value]
|
|
218
|
+
else:
|
|
219
|
+
values = list(value)
|
|
220
|
+
if not values:
|
|
221
|
+
raise ValueError(f"{name} list cannot be empty")
|
|
222
|
+
invalid = [v for v in values if v not in valid_values]
|
|
223
|
+
if invalid:
|
|
224
|
+
raise ValueError(f"Invalid {name} values: {invalid}. Must be within {valid_values}")
|
|
225
|
+
return values, False
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def _run_combo_variable_task(
|
|
229
|
+
rcp_val: str,
|
|
230
|
+
gcm_val: str,
|
|
231
|
+
rcm_val: str,
|
|
232
|
+
variable: str,
|
|
233
|
+
common_kwargs: dict,
|
|
234
|
+
max_threads_per_process: int,
|
|
235
|
+
bbox: dict,
|
|
236
|
+
):
|
|
237
|
+
data = process_worker(
|
|
238
|
+
max_threads_per_process,
|
|
239
|
+
variable=variable,
|
|
240
|
+
bbox=bbox,
|
|
241
|
+
rcp=rcp_val,
|
|
242
|
+
gcm=gcm_val,
|
|
243
|
+
rcm=rcm_val,
|
|
244
|
+
**common_kwargs,
|
|
245
|
+
)
|
|
246
|
+
return rcp_val, gcm_val, rcm_val, variable, data
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
def get_climate_data(
|
|
250
|
+
*,
|
|
251
|
+
country: str | None,
|
|
252
|
+
years_obs: range | None = None,
|
|
253
|
+
obs: bool = False,
|
|
254
|
+
cordex_domain: str | None = None,
|
|
255
|
+
rcp: str | list[str] | None = None,
|
|
256
|
+
gcm: str | list[str] | None = None,
|
|
257
|
+
rcm: str | list[str] | None = None,
|
|
258
|
+
years_up_to: int | None = None,
|
|
259
|
+
bias_correction: bool = False,
|
|
260
|
+
historical: bool = False,
|
|
261
|
+
buffer: int = 0,
|
|
262
|
+
xlim: tuple[float, float] | None = None,
|
|
263
|
+
ylim: tuple[float, float] | None = None,
|
|
264
|
+
remote: bool = True,
|
|
265
|
+
variables: list[str] | None = None,
|
|
266
|
+
num_processes: int = len(VALID_VARIABLES),
|
|
267
|
+
max_threads_per_process: int = 3,
|
|
268
|
+
dataset: str = "CORDEX-CORE",
|
|
269
|
+
max_total_processes: int = 12,
|
|
270
|
+
) -> dict:
|
|
271
|
+
"""
|
|
272
|
+
Retrieve CORDEX-CORE projections and/or ERA5 observations for a region.
|
|
273
|
+
|
|
274
|
+
The function orchestrates validation, spatial subsetting, unit conversion,
|
|
275
|
+
optional bias correction, and parallel download/processing.
|
|
276
|
+
Parallelization uses processes across variables or model/variable combinations,
|
|
277
|
+
with a thread pool inside each process for per-variable downloads.
|
|
278
|
+
|
|
279
|
+
Args:
|
|
280
|
+
country (str): Name of the country for which data is to be processed.
|
|
281
|
+
Use None if specifying a region using xlim and ylim.
|
|
282
|
+
years_obs (range): Range of years for observational data (ERA5 only). Required when obs is True. (default: None).
|
|
283
|
+
obs (bool): Flag to indicate if processing observational data (default: False).
|
|
284
|
+
When True, only years_obs is required. CORDEX parameters are optional.
|
|
285
|
+
cordex_domain (str): CORDEX domain of the climate data. One of {VALID_DOMAINS}.
|
|
286
|
+
Required when obs is False. (default: None).
|
|
287
|
+
rcp (str | list[str] | None): Representative Concentration Pathway(s). One of {VALID_RCPS}.
|
|
288
|
+
If None, all RCPs are used. Required when obs is False. (default: None).
|
|
289
|
+
gcm (str | list[str] | None): GCM name(s). One of {VALID_GCM}.
|
|
290
|
+
If None, all GCMs are used. Required when obs is False. (default: None).
|
|
291
|
+
rcm (str | list[str] | None): RCM name(s). One of {VALID_RCM}.
|
|
292
|
+
If None, all RCMs are used. Required when obs is False. (default: None).
|
|
293
|
+
years_up_to (int): The ending year for the projected data. Projections start in 2006 and ends in 2100.
|
|
294
|
+
Hence, if years_up_to is set to 2030, data will be downloaded for the 2006-2030 period.
|
|
295
|
+
Required when obs is False. (default: None).
|
|
296
|
+
bias_correction (bool): Whether to apply bias correction (default: False).
|
|
297
|
+
historical (bool): Flag to indicate if processing historical data (default: False).
|
|
298
|
+
If True, historical data is provided together with projections.
|
|
299
|
+
Historical simulation runs for CORDEX-CORE initiative are provided for the 1980-2005 time period.
|
|
300
|
+
buffer (int): Buffer distance to expand the region of interest (default: 0).
|
|
301
|
+
xlim (tuple or None): Longitudinal bounds of the region of interest. Use only when country is None (default: None).
|
|
302
|
+
ylim (tuple or None): Latitudinal bounds of the region of interest. Use only when country is None (default: None).
|
|
303
|
+
remote (bool): Flag to work with remote data or not (default: True).
|
|
304
|
+
variables (list[str] or None): List of variables to process. Must be a subset of {VALID_VARIABLES}. If None, all variables are processed. (default: None).
|
|
305
|
+
num_processes (int): Number of processes to use, one per variable for a single combo.
|
|
306
|
+
If num_processes <= 1 or only one variable is requested, variables run sequentially.
|
|
307
|
+
By default equals to the number of all possible variables. (default: {len(VALID_VARIABLES)}).
|
|
308
|
+
max_threads_per_process (int): Max number of threads within each process. (default: 3).
|
|
309
|
+
dataset (str): Dataset source to use. Options are "CORDEX-CORE" (original data) or "CORDEX-CORE-BC" (ISIMIP bias-corrected data). (default: "CORDEX-CORE").
|
|
310
|
+
max_total_processes (int): Max number of processes when multiple models/RCPs are requested.
|
|
311
|
+
Defaults to 12 (cap applies to total combo-variable tasks).
|
|
312
|
+
|
|
313
|
+
Returns:
|
|
314
|
+
dict: If a single (gcm, rcm, rcp) is requested, returns {variable: DataArray}.
|
|
315
|
+
If multiple are requested, returns {rcp: {"{gcm}-{rcm}": {variable: DataArray}}}.
|
|
316
|
+
"""
|
|
317
|
+
|
|
318
|
+
if obs and any(isinstance(v, list) for v in (rcp, gcm, rcm) if v is not None):
|
|
319
|
+
raise ValueError("rcp/gcm/rcm lists are not supported when obs=True")
|
|
320
|
+
|
|
321
|
+
if obs:
|
|
322
|
+
return _get_climate_data_single(
|
|
323
|
+
country=country,
|
|
324
|
+
years_obs=years_obs,
|
|
325
|
+
obs=obs,
|
|
326
|
+
cordex_domain=cordex_domain,
|
|
327
|
+
rcp=rcp if isinstance(rcp, str) or rcp is None else rcp[0],
|
|
328
|
+
gcm=gcm if isinstance(gcm, str) or gcm is None else gcm[0],
|
|
329
|
+
rcm=rcm if isinstance(rcm, str) or rcm is None else rcm[0],
|
|
330
|
+
years_up_to=years_up_to,
|
|
331
|
+
bias_correction=bias_correction,
|
|
332
|
+
historical=historical,
|
|
333
|
+
buffer=buffer,
|
|
334
|
+
xlim=xlim,
|
|
335
|
+
ylim=ylim,
|
|
336
|
+
remote=remote,
|
|
337
|
+
variables=variables,
|
|
338
|
+
num_processes=num_processes,
|
|
339
|
+
max_threads_per_process=max_threads_per_process,
|
|
340
|
+
dataset=dataset,
|
|
341
|
+
)
|
|
342
|
+
|
|
343
|
+
if cordex_domain is None:
|
|
344
|
+
raise ValueError("cordex_domain is required when obs is False")
|
|
345
|
+
|
|
346
|
+
if xlim is None and ylim is not None or xlim is not None and ylim is None:
|
|
347
|
+
raise ValueError(
|
|
348
|
+
"xlim and ylim mismatch: they must be both specified or both unspecified"
|
|
349
|
+
)
|
|
350
|
+
if country is None and xlim is None:
|
|
351
|
+
raise ValueError("You must specify a country or (xlim, ylim)")
|
|
352
|
+
if country is not None and xlim is not None:
|
|
353
|
+
raise ValueError("You must specify either country or (xlim, ylim), not both")
|
|
354
|
+
|
|
355
|
+
if dataset not in VALID_DATASETS:
|
|
356
|
+
raise ValueError(
|
|
357
|
+
f"Invalid dataset='{dataset}'. Must be one of {VALID_DATASETS}"
|
|
358
|
+
)
|
|
359
|
+
if dataset == "CORDEX-CORE-BC" and bias_correction:
|
|
360
|
+
raise ValueError(
|
|
361
|
+
"Cannot apply bias_correction=True when using dataset='CORDEX-CORE-BC'. "
|
|
362
|
+
"The CORDEX-CORE-BC dataset is already bias-corrected using ISIMIP methodology."
|
|
363
|
+
)
|
|
364
|
+
|
|
365
|
+
if years_up_to is None:
|
|
366
|
+
raise ValueError("years_up_to is required when obs is False")
|
|
367
|
+
if years_up_to <= 2006:
|
|
368
|
+
raise ValueError("years_up_to must be greater than 2006")
|
|
369
|
+
|
|
370
|
+
if years_obs is None:
|
|
371
|
+
years_obs = DEFAULT_YEARS_OBS
|
|
372
|
+
|
|
373
|
+
rcps, _all_rcps = _normalize_selection(rcp, VALID_RCPS, "rcp")
|
|
374
|
+
gcms, all_gcms = _normalize_selection(gcm, VALID_GCM, "gcm")
|
|
375
|
+
rcms, all_rcms = _normalize_selection(rcm, VALID_RCM, "rcm")
|
|
376
|
+
|
|
377
|
+
combos = [(r, g, m) for r in rcps for g in gcms for m in rcms]
|
|
378
|
+
if len(combos) == 1:
|
|
379
|
+
rcp_single, gcm_single, rcm_single = combos[0]
|
|
380
|
+
return _get_climate_data_single(
|
|
381
|
+
country=country,
|
|
382
|
+
years_obs=years_obs,
|
|
383
|
+
obs=obs,
|
|
384
|
+
cordex_domain=cordex_domain,
|
|
385
|
+
rcp=rcp_single,
|
|
386
|
+
gcm=gcm_single,
|
|
387
|
+
rcm=rcm_single,
|
|
388
|
+
years_up_to=years_up_to,
|
|
389
|
+
bias_correction=bias_correction,
|
|
390
|
+
historical=historical,
|
|
391
|
+
buffer=buffer,
|
|
392
|
+
xlim=xlim,
|
|
393
|
+
ylim=ylim,
|
|
394
|
+
remote=remote,
|
|
395
|
+
variables=variables,
|
|
396
|
+
num_processes=num_processes,
|
|
397
|
+
max_threads_per_process=max_threads_per_process,
|
|
398
|
+
dataset=dataset,
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
valid_combos: list[tuple[str, str, str]] = []
|
|
402
|
+
invalid_combos: list[tuple[str, str, str]] = []
|
|
403
|
+
for rcp_val, gcm_val, rcm_val in combos:
|
|
404
|
+
try:
|
|
405
|
+
_validate_gcm_rcm_combinations(cordex_domain, gcm_val, rcm_val)
|
|
406
|
+
valid_combos.append((rcp_val, gcm_val, rcm_val))
|
|
407
|
+
except ValueError:
|
|
408
|
+
invalid_combos.append((rcp_val, gcm_val, rcm_val))
|
|
409
|
+
|
|
410
|
+
if invalid_combos and not (all_gcms or all_rcms):
|
|
411
|
+
raise ValueError(
|
|
412
|
+
"Some requested GCM/RCM combinations are invalid for this domain: "
|
|
413
|
+
+ ", ".join(f"{g}-{m} ({r})" for r, g, m in invalid_combos)
|
|
414
|
+
)
|
|
415
|
+
if invalid_combos and (all_gcms or all_rcms):
|
|
416
|
+
logger.warning(
|
|
417
|
+
"Skipping invalid GCM/RCM combinations for %s: %s",
|
|
418
|
+
cordex_domain,
|
|
419
|
+
", ".join(f"{g}-{m} ({r})" for r, g, m in invalid_combos),
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
if variables is not None:
|
|
423
|
+
invalid_vars = [var for var in variables if var not in VALID_VARIABLES]
|
|
424
|
+
if invalid_vars:
|
|
425
|
+
raise ValueError(
|
|
426
|
+
f"Invalid variables: {invalid_vars}. Must be a subset of {VALID_VARIABLES}"
|
|
427
|
+
)
|
|
428
|
+
variables_list = list(variables)
|
|
429
|
+
else:
|
|
430
|
+
variables_list = list(VALID_VARIABLES)
|
|
431
|
+
|
|
432
|
+
results: dict[str, dict[str, dict[str, xr.DataArray]]] = {}
|
|
433
|
+
|
|
434
|
+
max_workers = max_total_processes
|
|
435
|
+
max_workers = max(1, min(max_workers, len(valid_combos) * len(variables_list)))
|
|
436
|
+
|
|
437
|
+
common_kwargs = {
|
|
438
|
+
"years_obs": years_obs,
|
|
439
|
+
"obs": obs,
|
|
440
|
+
"cordex_domain": cordex_domain,
|
|
441
|
+
"years_up_to": years_up_to,
|
|
442
|
+
"bias_correction": bias_correction,
|
|
443
|
+
"historical": historical,
|
|
444
|
+
"remote": remote,
|
|
445
|
+
"dataset": dataset,
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
bbox = _geo_localize(country, xlim, ylim, buffer, cordex_domain, obs)
|
|
449
|
+
|
|
450
|
+
for rcp_val, gcm_val, rcm_val in valid_combos:
|
|
451
|
+
_validate_urls(
|
|
452
|
+
gcm_val,
|
|
453
|
+
rcm_val,
|
|
454
|
+
rcp_val,
|
|
455
|
+
remote,
|
|
456
|
+
cordex_domain,
|
|
457
|
+
obs,
|
|
458
|
+
historical,
|
|
459
|
+
bias_correction,
|
|
460
|
+
dataset,
|
|
461
|
+
variables_list,
|
|
462
|
+
)
|
|
463
|
+
|
|
464
|
+
tasks = [
|
|
465
|
+
(rcp_val, gcm_val, rcm_val, variable, common_kwargs, max_threads_per_process, bbox)
|
|
466
|
+
for rcp_val, gcm_val, rcm_val in valid_combos
|
|
467
|
+
for variable in variables_list
|
|
468
|
+
]
|
|
469
|
+
|
|
470
|
+
with mp.Pool(processes=max_workers) as pool:
|
|
471
|
+
try:
|
|
472
|
+
for rcp_val, gcm_val, rcm_val, variable, data in pool.starmap(
|
|
473
|
+
_run_combo_variable_task, tasks
|
|
474
|
+
):
|
|
475
|
+
results.setdefault(rcp_val, {}).setdefault(f"{gcm_val}-{rcm_val}", {})[
|
|
476
|
+
variable
|
|
477
|
+
] = data
|
|
478
|
+
except Exception as exc:
|
|
479
|
+
pool.terminate()
|
|
480
|
+
pool.join()
|
|
481
|
+
raise RuntimeError(
|
|
482
|
+
"Model/RCP processing failed. Enable DEBUG logs for details."
|
|
483
|
+
) from exc
|
|
484
|
+
|
|
485
|
+
return results
|
|
486
|
+
|
|
487
|
+
|
|
488
|
+
if __name__ == "__main__":
|
|
489
|
+
# Examples: show how get_climate_data parallelizes.
|
|
490
|
+
cordex_domain = "AFR-22"
|
|
491
|
+
years_up_to = 2015
|
|
492
|
+
|
|
493
|
+
print("\nExample 1: multiple models (combo-variable tasks parallelized)...")
|
|
494
|
+
multi = get_climate_data(
|
|
495
|
+
country="Togo",
|
|
496
|
+
cordex_domain=cordex_domain,
|
|
497
|
+
rcp="rcp26",
|
|
498
|
+
gcm=VALID_GCM,
|
|
499
|
+
rcm=VALID_RCM,
|
|
500
|
+
years_up_to=years_up_to,
|
|
501
|
+
historical=True,
|
|
502
|
+
bias_correction=False,
|
|
503
|
+
dataset="CORDEX-CORE",
|
|
504
|
+
)
|
|
505
|
+
# Show a compact summary of the structure returned
|
|
506
|
+
for rcp_val, model_map in multi.items():
|
|
507
|
+
print(rcp_val, "models:", list(model_map.keys()))
|
|
508
|
+
|
|
509
|
+
print("\nExample 2: single model/RCP (variables parallelized)...")
|
|
510
|
+
single = get_climate_data(
|
|
511
|
+
country="Togo",
|
|
512
|
+
cordex_domain=cordex_domain,
|
|
513
|
+
rcp="rcp26",
|
|
514
|
+
gcm="MPI",
|
|
515
|
+
rcm="REMO",
|
|
516
|
+
years_up_to=years_up_to,
|
|
517
|
+
historical=True,
|
|
518
|
+
bias_correction=False,
|
|
519
|
+
dataset="CORDEX-CORE",
|
|
520
|
+
)
|
|
521
|
+
print("Single model variables:", list(single.keys()))
|
|
522
|
+
|
|
523
|
+
print("Examples completed successfully!")
|