NREL-reV 0.8.7__py3-none-any.whl → 0.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/METADATA +13 -10
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/RECORD +43 -43
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/WHEEL +1 -1
- reV/SAM/SAM.py +217 -133
- reV/SAM/econ.py +18 -14
- reV/SAM/generation.py +611 -422
- reV/SAM/windbos.py +93 -79
- reV/bespoke/bespoke.py +681 -377
- reV/bespoke/cli_bespoke.py +2 -0
- reV/bespoke/place_turbines.py +187 -43
- reV/config/output_request.py +2 -1
- reV/config/project_points.py +218 -140
- reV/econ/econ.py +166 -114
- reV/econ/economies_of_scale.py +91 -45
- reV/generation/base.py +331 -184
- reV/generation/generation.py +326 -200
- reV/generation/output_attributes/lcoe_fcr_inputs.json +38 -3
- reV/handlers/__init__.py +0 -1
- reV/handlers/exclusions.py +16 -15
- reV/handlers/multi_year.py +57 -26
- reV/handlers/outputs.py +6 -5
- reV/handlers/transmission.py +44 -27
- reV/hybrids/hybrid_methods.py +30 -30
- reV/hybrids/hybrids.py +305 -189
- reV/nrwal/nrwal.py +262 -168
- reV/qa_qc/cli_qa_qc.py +14 -10
- reV/qa_qc/qa_qc.py +217 -119
- reV/qa_qc/summary.py +228 -146
- reV/rep_profiles/rep_profiles.py +349 -230
- reV/supply_curve/aggregation.py +349 -188
- reV/supply_curve/competitive_wind_farms.py +90 -48
- reV/supply_curve/exclusions.py +138 -85
- reV/supply_curve/extent.py +75 -50
- reV/supply_curve/points.py +735 -390
- reV/supply_curve/sc_aggregation.py +357 -248
- reV/supply_curve/supply_curve.py +604 -347
- reV/supply_curve/tech_mapping.py +144 -82
- reV/utilities/__init__.py +274 -16
- reV/utilities/pytest_utils.py +8 -4
- reV/version.py +1 -1
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/LICENSE +0 -0
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/entry_points.txt +0 -0
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/top_level.txt +0 -0
reV/supply_curve/aggregation.py
CHANGED
@@ -1,27 +1,29 @@
|
|
1
1
|
# -*- coding: utf-8 -*-
|
2
|
-
"""
|
3
|
-
|
4
|
-
|
2
|
+
"""reV aggregation framework."""
|
3
|
+
|
4
|
+
import logging
|
5
|
+
import os
|
5
6
|
from abc import ABC, abstractmethod
|
7
|
+
|
6
8
|
import h5py
|
7
|
-
import logging
|
8
9
|
import numpy as np
|
9
|
-
import os
|
10
10
|
import pandas as pd
|
11
|
+
from rex.resource import Resource
|
12
|
+
from rex.utilities.execution import SpawnProcessPool
|
13
|
+
from rex.utilities.loggers import log_mem
|
11
14
|
|
12
|
-
from reV.handlers.outputs import Outputs
|
13
15
|
from reV.handlers.exclusions import ExclusionLayers
|
16
|
+
from reV.handlers.outputs import Outputs
|
14
17
|
from reV.supply_curve.exclusions import ExclusionMaskFromDict
|
15
18
|
from reV.supply_curve.extent import SupplyCurveExtent
|
16
|
-
from reV.supply_curve.tech_mapping import TechMapping
|
17
19
|
from reV.supply_curve.points import AggregationSupplyCurvePoint
|
18
|
-
from reV.
|
19
|
-
|
20
|
-
from reV.utilities import
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
20
|
+
from reV.supply_curve.tech_mapping import TechMapping
|
21
|
+
from reV.utilities import ResourceMetaField, SupplyCurveField, log_versions
|
22
|
+
from reV.utilities.exceptions import (
|
23
|
+
EmptySupplyCurvePointError,
|
24
|
+
FileInputError,
|
25
|
+
SupplyCurveInputError,
|
26
|
+
)
|
25
27
|
|
26
28
|
logger = logging.getLogger(__name__)
|
27
29
|
|
@@ -29,8 +31,13 @@ logger = logging.getLogger(__name__)
|
|
29
31
|
class AbstractAggFileHandler(ABC):
|
30
32
|
"""Simple framework to handle aggregation file context managers."""
|
31
33
|
|
32
|
-
def __init__(
|
33
|
-
|
34
|
+
def __init__(
|
35
|
+
self,
|
36
|
+
excl_fpath,
|
37
|
+
excl_dict=None,
|
38
|
+
area_filter_kernel="queen",
|
39
|
+
min_area=None,
|
40
|
+
):
|
34
41
|
"""
|
35
42
|
Parameters
|
36
43
|
----------
|
@@ -51,9 +58,12 @@ class AbstractAggFileHandler(ABC):
|
|
51
58
|
by default None
|
52
59
|
"""
|
53
60
|
self._excl_fpath = excl_fpath
|
54
|
-
self._excl = ExclusionMaskFromDict(
|
55
|
-
|
56
|
-
|
61
|
+
self._excl = ExclusionMaskFromDict(
|
62
|
+
excl_fpath,
|
63
|
+
layers_dict=excl_dict,
|
64
|
+
min_area=min_area,
|
65
|
+
kernel=area_filter_kernel,
|
66
|
+
)
|
57
67
|
|
58
68
|
def __enter__(self):
|
59
69
|
return self
|
@@ -95,9 +105,15 @@ class AggFileHandler(AbstractAggFileHandler):
|
|
95
105
|
|
96
106
|
DEFAULT_H5_HANDLER = Resource
|
97
107
|
|
98
|
-
def __init__(
|
99
|
-
|
100
|
-
|
108
|
+
def __init__(
|
109
|
+
self,
|
110
|
+
excl_fpath,
|
111
|
+
h5_fpath,
|
112
|
+
excl_dict=None,
|
113
|
+
area_filter_kernel="queen",
|
114
|
+
min_area=None,
|
115
|
+
h5_handler=None,
|
116
|
+
):
|
101
117
|
"""
|
102
118
|
Parameters
|
103
119
|
----------
|
@@ -121,9 +137,12 @@ class AggFileHandler(AbstractAggFileHandler):
|
|
121
137
|
Optional special handler similar to the rex.Resource handler which
|
122
138
|
is default.
|
123
139
|
"""
|
124
|
-
super().__init__(
|
125
|
-
|
126
|
-
|
140
|
+
super().__init__(
|
141
|
+
excl_fpath,
|
142
|
+
excl_dict=excl_dict,
|
143
|
+
area_filter_kernel=area_filter_kernel,
|
144
|
+
min_area=min_area,
|
145
|
+
)
|
127
146
|
|
128
147
|
if h5_handler is None:
|
129
148
|
self._h5 = Resource(h5_fpath)
|
@@ -152,10 +171,19 @@ class BaseAggregation(ABC):
|
|
152
171
|
"""Abstract supply curve points aggregation framework based on only an
|
153
172
|
exclusion file and techmap."""
|
154
173
|
|
155
|
-
def __init__(
|
156
|
-
|
157
|
-
|
158
|
-
|
174
|
+
def __init__(
|
175
|
+
self,
|
176
|
+
excl_fpath,
|
177
|
+
tm_dset,
|
178
|
+
excl_dict=None,
|
179
|
+
area_filter_kernel="queen",
|
180
|
+
min_area=None,
|
181
|
+
resolution=64,
|
182
|
+
excl_area=None,
|
183
|
+
res_fpath=None,
|
184
|
+
gids=None,
|
185
|
+
pre_extract_inclusions=False,
|
186
|
+
):
|
159
187
|
"""
|
160
188
|
Parameters
|
161
189
|
----------
|
@@ -208,12 +236,15 @@ class BaseAggregation(ABC):
|
|
208
236
|
self._validate_tech_mapping()
|
209
237
|
|
210
238
|
if pre_extract_inclusions:
|
211
|
-
self._inclusion_mask =
|
239
|
+
self._inclusion_mask = (
|
212
240
|
ExclusionMaskFromDict.extract_inclusion_mask(
|
213
|
-
excl_fpath,
|
241
|
+
excl_fpath,
|
242
|
+
tm_dset,
|
214
243
|
excl_dict=excl_dict,
|
215
244
|
area_filter_kernel=area_filter_kernel,
|
216
|
-
min_area=min_area
|
245
|
+
min_area=min_area,
|
246
|
+
)
|
247
|
+
)
|
217
248
|
else:
|
218
249
|
self._inclusion_mask = None
|
219
250
|
|
@@ -228,20 +259,28 @@ class BaseAggregation(ABC):
|
|
228
259
|
if tm_in_excl:
|
229
260
|
logger.info('Found techmap "{}".'.format(self._tm_dset))
|
230
261
|
elif not tm_in_excl and not excl_fp_is_str:
|
231
|
-
msg = (
|
232
|
-
|
233
|
-
|
262
|
+
msg = (
|
263
|
+
'Could not find techmap dataset "{}" and cannot run '
|
264
|
+
"techmap with arbitrary multiple exclusion filepaths "
|
265
|
+
"to write to: {}".format(self._tm_dset, self._excl_fpath)
|
266
|
+
)
|
234
267
|
logger.error(msg)
|
235
268
|
raise RuntimeError(msg)
|
236
269
|
else:
|
237
|
-
logger.info(
|
238
|
-
|
270
|
+
logger.info(
|
271
|
+
'Could not find techmap "{}". Running techmap module.'.format(
|
272
|
+
self._tm_dset
|
273
|
+
)
|
274
|
+
)
|
239
275
|
try:
|
240
|
-
TechMapping.run(
|
241
|
-
|
276
|
+
TechMapping.run(
|
277
|
+
self._excl_fpath, self._res_fpath, dset=self._tm_dset
|
278
|
+
)
|
242
279
|
except Exception as e:
|
243
|
-
msg = (
|
244
|
-
|
280
|
+
msg = (
|
281
|
+
"TechMapping process failed. Received the "
|
282
|
+
"following error:\n{}".format(e)
|
283
|
+
)
|
245
284
|
logger.exception(msg)
|
246
285
|
raise RuntimeError(msg) from e
|
247
286
|
|
@@ -255,8 +294,9 @@ class BaseAggregation(ABC):
|
|
255
294
|
ndarray
|
256
295
|
"""
|
257
296
|
if self._gids is None:
|
258
|
-
with SupplyCurveExtent(
|
259
|
-
|
297
|
+
with SupplyCurveExtent(
|
298
|
+
self._excl_fpath, resolution=self._resolution
|
299
|
+
) as sc:
|
260
300
|
self._gids = sc.valid_sc_points(self._tm_dset)
|
261
301
|
elif np.issubdtype(type(self._gids), np.number):
|
262
302
|
self._gids = np.array([self._gids])
|
@@ -274,8 +314,9 @@ class BaseAggregation(ABC):
|
|
274
314
|
tuple
|
275
315
|
"""
|
276
316
|
if self._shape is None:
|
277
|
-
with SupplyCurveExtent(
|
278
|
-
|
317
|
+
with SupplyCurveExtent(
|
318
|
+
self._excl_fpath, resolution=self._resolution
|
319
|
+
) as sc:
|
279
320
|
self._shape = sc.exclusions.shape
|
280
321
|
|
281
322
|
return self._shape
|
@@ -301,14 +342,18 @@ class BaseAggregation(ABC):
|
|
301
342
|
Area of an exclusion pixel in km2
|
302
343
|
"""
|
303
344
|
if excl_area is None:
|
304
|
-
logger.debug(
|
305
|
-
|
345
|
+
logger.debug(
|
346
|
+
"Setting the exclusion area from the area of a pixel "
|
347
|
+
"in {}".format(excl_fpath)
|
348
|
+
)
|
306
349
|
with ExclusionLayers(excl_fpath) as excl:
|
307
350
|
excl_area = excl.pixel_area
|
308
351
|
|
309
352
|
if excl_area is None:
|
310
|
-
e = (
|
311
|
-
|
353
|
+
e = (
|
354
|
+
"No exclusion pixel area was input and could not parse "
|
355
|
+
"area from the exclusion file attributes!"
|
356
|
+
)
|
312
357
|
logger.error(e)
|
313
358
|
raise SupplyCurveInputError(e)
|
314
359
|
|
@@ -337,14 +382,17 @@ class BaseAggregation(ABC):
|
|
337
382
|
elif isinstance(inclusion_mask, np.ndarray):
|
338
383
|
assert inclusion_mask.shape == excl_shape
|
339
384
|
elif inclusion_mask is not None:
|
340
|
-
msg = (
|
341
|
-
|
385
|
+
msg = (
|
386
|
+
"Expected inclusion_mask to be dict or array but received "
|
387
|
+
"{}".format(type(inclusion_mask))
|
388
|
+
)
|
342
389
|
logger.error(msg)
|
343
390
|
raise SupplyCurveInputError(msg)
|
344
391
|
|
345
392
|
@staticmethod
|
346
|
-
def _get_gid_inclusion_mask(
|
347
|
-
|
393
|
+
def _get_gid_inclusion_mask(
|
394
|
+
inclusion_mask, gid, slice_lookup, resolution=64
|
395
|
+
):
|
348
396
|
"""
|
349
397
|
Get inclusion mask for desired gid
|
350
398
|
|
@@ -381,8 +429,10 @@ class BaseAggregation(ABC):
|
|
381
429
|
row_slice, col_slice = slice_lookup[gid]
|
382
430
|
gid_inclusions = inclusion_mask[row_slice, col_slice]
|
383
431
|
elif inclusion_mask is not None:
|
384
|
-
msg = (
|
385
|
-
|
432
|
+
msg = (
|
433
|
+
"Expected inclusion_mask to be dict or array but received "
|
434
|
+
"{}".format(type(inclusion_mask))
|
435
|
+
)
|
386
436
|
logger.error(msg)
|
387
437
|
raise SupplyCurveInputError(msg)
|
388
438
|
|
@@ -407,26 +457,34 @@ class BaseAggregation(ABC):
|
|
407
457
|
generation run.
|
408
458
|
"""
|
409
459
|
|
410
|
-
if gen_fpath.endswith(
|
460
|
+
if gen_fpath.endswith(".h5"):
|
411
461
|
with Resource(gen_fpath) as f:
|
412
462
|
gen_index = f.meta
|
413
|
-
elif gen_fpath.endswith(
|
463
|
+
elif gen_fpath.endswith(".csv"):
|
414
464
|
gen_index = pd.read_csv(gen_fpath)
|
415
465
|
else:
|
416
|
-
msg = (
|
417
|
-
|
418
|
-
|
466
|
+
msg = (
|
467
|
+
"Could not recognize gen_fpath input, needs to be reV gen "
|
468
|
+
"output h5 or project points csv but received: {}".format(
|
469
|
+
gen_fpath
|
470
|
+
)
|
471
|
+
)
|
419
472
|
logger.error(msg)
|
420
473
|
raise FileInputError(msg)
|
421
474
|
|
422
|
-
if
|
423
|
-
gen_index = gen_index.rename(
|
424
|
-
|
425
|
-
|
426
|
-
gen_index = gen_index.
|
427
|
-
gen_index =
|
428
|
-
|
429
|
-
|
475
|
+
if ResourceMetaField.GID in gen_index:
|
476
|
+
gen_index = gen_index.rename(
|
477
|
+
columns={ResourceMetaField.GID: SupplyCurveField.RES_GIDS}
|
478
|
+
)
|
479
|
+
gen_index[SupplyCurveField.GEN_GIDS] = gen_index.index
|
480
|
+
gen_index = gen_index[
|
481
|
+
[SupplyCurveField.RES_GIDS, SupplyCurveField.GEN_GIDS]
|
482
|
+
]
|
483
|
+
gen_index = gen_index.set_index(keys=SupplyCurveField.RES_GIDS)
|
484
|
+
gen_index = gen_index.reindex(
|
485
|
+
range(int(gen_index.index.max() + 1))
|
486
|
+
)
|
487
|
+
gen_index = gen_index[SupplyCurveField.GEN_GIDS].values
|
430
488
|
gen_index[np.isnan(gen_index)] = -1
|
431
489
|
gen_index = gen_index.astype(np.int32)
|
432
490
|
else:
|
@@ -439,10 +497,19 @@ class Aggregation(BaseAggregation):
|
|
439
497
|
"""Concrete but generalized aggregation framework to aggregate ANY reV h5
|
440
498
|
file to a supply curve grid (based on an aggregated exclusion grid)."""
|
441
499
|
|
442
|
-
def __init__(
|
443
|
-
|
444
|
-
|
445
|
-
|
500
|
+
def __init__(
|
501
|
+
self,
|
502
|
+
excl_fpath,
|
503
|
+
tm_dset,
|
504
|
+
*agg_dset,
|
505
|
+
excl_dict=None,
|
506
|
+
area_filter_kernel="queen",
|
507
|
+
min_area=None,
|
508
|
+
resolution=64,
|
509
|
+
excl_area=None,
|
510
|
+
gids=None,
|
511
|
+
pre_extract_inclusions=False,
|
512
|
+
):
|
446
513
|
"""
|
447
514
|
Parameters
|
448
515
|
----------
|
@@ -486,18 +553,24 @@ class Aggregation(BaseAggregation):
|
|
486
553
|
the inclusion mask on the fly with parallel workers.
|
487
554
|
"""
|
488
555
|
log_versions(logger)
|
489
|
-
logger.info(
|
490
|
-
logger.debug(
|
491
|
-
logger.debug(
|
492
|
-
|
493
|
-
super().__init__(
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
556
|
+
logger.info("Initializing Aggregation...")
|
557
|
+
logger.debug("Exclusion filepath: {}".format(excl_fpath))
|
558
|
+
logger.debug("Exclusion dict: {}".format(excl_dict))
|
559
|
+
|
560
|
+
super().__init__(
|
561
|
+
excl_fpath,
|
562
|
+
tm_dset,
|
563
|
+
excl_dict=excl_dict,
|
564
|
+
area_filter_kernel=area_filter_kernel,
|
565
|
+
min_area=min_area,
|
566
|
+
resolution=resolution,
|
567
|
+
excl_area=excl_area,
|
568
|
+
gids=gids,
|
569
|
+
pre_extract_inclusions=pre_extract_inclusions,
|
570
|
+
)
|
498
571
|
|
499
572
|
if isinstance(agg_dset, str):
|
500
|
-
agg_dset = (agg_dset,
|
573
|
+
agg_dset = (agg_dset,)
|
501
574
|
|
502
575
|
self._agg_dsets = agg_dset
|
503
576
|
|
@@ -505,33 +578,51 @@ class Aggregation(BaseAggregation):
|
|
505
578
|
"""Do a preflight check on input files"""
|
506
579
|
|
507
580
|
if not os.path.exists(self._excl_fpath):
|
508
|
-
raise FileNotFoundError(
|
509
|
-
|
581
|
+
raise FileNotFoundError(
|
582
|
+
"Could not find required exclusions file: " "{}".format(
|
583
|
+
self._excl_fpath
|
584
|
+
)
|
585
|
+
)
|
510
586
|
|
511
587
|
if not os.path.exists(h5_fpath):
|
512
|
-
raise FileNotFoundError(
|
513
|
-
|
588
|
+
raise FileNotFoundError(
|
589
|
+
"Could not find required h5 file: " "{}".format(h5_fpath)
|
590
|
+
)
|
514
591
|
|
515
|
-
with h5py.File(self._excl_fpath,
|
592
|
+
with h5py.File(self._excl_fpath, "r") as f:
|
516
593
|
if self._tm_dset not in f:
|
517
|
-
raise FileInputError(
|
518
|
-
|
519
|
-
|
520
|
-
|
594
|
+
raise FileInputError(
|
595
|
+
'Could not find techmap dataset "{}" '
|
596
|
+
"in exclusions file: {}".format(
|
597
|
+
self._tm_dset, self._excl_fpath
|
598
|
+
)
|
599
|
+
)
|
521
600
|
|
522
601
|
with Resource(h5_fpath) as f:
|
523
602
|
for dset in self._agg_dsets:
|
524
603
|
if dset not in f:
|
525
|
-
raise FileInputError(
|
526
|
-
|
527
|
-
|
604
|
+
raise FileInputError(
|
605
|
+
'Could not find provided dataset "{}"'
|
606
|
+
" in h5 file: {}".format(dset, h5_fpath)
|
607
|
+
)
|
528
608
|
|
529
609
|
@classmethod
|
530
|
-
def run_serial(
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
610
|
+
def run_serial(
|
611
|
+
cls,
|
612
|
+
excl_fpath,
|
613
|
+
h5_fpath,
|
614
|
+
tm_dset,
|
615
|
+
*agg_dset,
|
616
|
+
agg_method="mean",
|
617
|
+
excl_dict=None,
|
618
|
+
inclusion_mask=None,
|
619
|
+
area_filter_kernel="queen",
|
620
|
+
min_area=None,
|
621
|
+
resolution=64,
|
622
|
+
excl_area=0.0081,
|
623
|
+
gids=None,
|
624
|
+
gen_index=None,
|
625
|
+
):
|
535
626
|
"""
|
536
627
|
Standalone method to aggregate - can be parallelized.
|
537
628
|
|
@@ -602,17 +693,22 @@ class Aggregation(BaseAggregation):
|
|
602
693
|
cls._check_inclusion_mask(inclusion_mask, gids, exclusion_shape)
|
603
694
|
|
604
695
|
# pre-extract handlers so they are not repeatedly initialized
|
605
|
-
file_kwargs = {
|
606
|
-
|
607
|
-
|
608
|
-
|
696
|
+
file_kwargs = {
|
697
|
+
"excl_dict": excl_dict,
|
698
|
+
"area_filter_kernel": area_filter_kernel,
|
699
|
+
"min_area": min_area,
|
700
|
+
}
|
701
|
+
dsets = (
|
702
|
+
*agg_dset,
|
703
|
+
"meta",
|
704
|
+
)
|
609
705
|
agg_out = {ds: [] for ds in dsets}
|
610
706
|
with AggFileHandler(excl_fpath, h5_fpath, **file_kwargs) as fh:
|
611
707
|
n_finished = 0
|
612
708
|
for gid in gids:
|
613
709
|
gid_inclusions = cls._get_gid_inclusion_mask(
|
614
|
-
inclusion_mask, gid, slice_lookup,
|
615
|
-
|
710
|
+
inclusion_mask, gid, slice_lookup, resolution=resolution
|
711
|
+
)
|
616
712
|
try:
|
617
713
|
gid_out = AggregationSupplyCurvePoint.run(
|
618
714
|
gid,
|
@@ -627,28 +723,40 @@ class Aggregation(BaseAggregation):
|
|
627
723
|
excl_area=excl_area,
|
628
724
|
exclusion_shape=exclusion_shape,
|
629
725
|
close=False,
|
630
|
-
gen_index=gen_index
|
726
|
+
gen_index=gen_index,
|
727
|
+
)
|
631
728
|
|
632
729
|
except EmptySupplyCurvePointError:
|
633
|
-
logger.debug(
|
634
|
-
|
730
|
+
logger.debug(
|
731
|
+
"SC gid {} is fully excluded or does not "
|
732
|
+
"have any valid source data!".format(gid)
|
733
|
+
)
|
635
734
|
except Exception as e:
|
636
|
-
msg =
|
735
|
+
msg = "SC gid {} failed!".format(gid)
|
637
736
|
logger.exception(msg)
|
638
737
|
raise RuntimeError(msg) from e
|
639
738
|
else:
|
640
739
|
n_finished += 1
|
641
|
-
logger.debug(
|
642
|
-
|
643
|
-
|
740
|
+
logger.debug(
|
741
|
+
"Serial aggregation: "
|
742
|
+
"{} out of {} points complete".format(
|
743
|
+
n_finished, len(gids)
|
744
|
+
)
|
745
|
+
)
|
644
746
|
log_mem(logger)
|
645
747
|
for k, v in gid_out.items():
|
646
748
|
agg_out[k].append(v)
|
647
749
|
|
648
750
|
return agg_out
|
649
751
|
|
650
|
-
def run_parallel(
|
651
|
-
|
752
|
+
def run_parallel(
|
753
|
+
self,
|
754
|
+
h5_fpath,
|
755
|
+
agg_method="mean",
|
756
|
+
excl_area=None,
|
757
|
+
max_workers=None,
|
758
|
+
sites_per_worker=100,
|
759
|
+
):
|
652
760
|
"""
|
653
761
|
Aggregate in parallel
|
654
762
|
|
@@ -681,22 +789,29 @@ class Aggregation(BaseAggregation):
|
|
681
789
|
chunks = np.array_split(self.gids, chunks)
|
682
790
|
|
683
791
|
if self._inclusion_mask is not None:
|
684
|
-
with SupplyCurveExtent(
|
685
|
-
|
792
|
+
with SupplyCurveExtent(
|
793
|
+
self._excl_fpath, resolution=self._resolution
|
794
|
+
) as sc:
|
686
795
|
assert sc.exclusions.shape == self._inclusion_mask.shape
|
687
796
|
slice_lookup = sc.get_slice_lookup(self.gids)
|
688
797
|
|
689
|
-
logger.info(
|
690
|
-
|
691
|
-
|
692
|
-
|
693
|
-
|
798
|
+
logger.info(
|
799
|
+
"Running supply curve point aggregation for "
|
800
|
+
"points {} through {} at a resolution of {} "
|
801
|
+
"on {} cores in {} chunks.".format(
|
802
|
+
self.gids[0],
|
803
|
+
self.gids[-1],
|
804
|
+
self._resolution,
|
805
|
+
max_workers,
|
806
|
+
len(chunks),
|
807
|
+
)
|
808
|
+
)
|
694
809
|
|
695
810
|
n_finished = 0
|
696
811
|
futures = []
|
697
|
-
dsets = self._agg_dsets + (
|
812
|
+
dsets = self._agg_dsets + ("meta",)
|
698
813
|
agg_out = {ds: [] for ds in dsets}
|
699
|
-
loggers = [__name__,
|
814
|
+
loggers = [__name__, "reV.supply_curve.points", "reV"]
|
700
815
|
with SpawnProcessPool(max_workers=max_workers, loggers=loggers) as exe:
|
701
816
|
# iterate through split executions, submitting each to worker
|
702
817
|
for gid_set in chunks:
|
@@ -709,36 +824,45 @@ class Aggregation(BaseAggregation):
|
|
709
824
|
chunk_incl_masks[gid] = self._inclusion_mask[rs, cs]
|
710
825
|
|
711
826
|
# submit executions and append to futures list
|
712
|
-
futures.append(
|
713
|
-
|
714
|
-
|
715
|
-
|
716
|
-
|
717
|
-
|
718
|
-
|
719
|
-
|
720
|
-
|
721
|
-
|
722
|
-
|
723
|
-
|
724
|
-
|
725
|
-
|
726
|
-
|
827
|
+
futures.append(
|
828
|
+
exe.submit(
|
829
|
+
self.run_serial,
|
830
|
+
self._excl_fpath,
|
831
|
+
h5_fpath,
|
832
|
+
self._tm_dset,
|
833
|
+
*self._agg_dsets,
|
834
|
+
agg_method=agg_method,
|
835
|
+
excl_dict=self._excl_dict,
|
836
|
+
inclusion_mask=chunk_incl_masks,
|
837
|
+
area_filter_kernel=self._area_filter_kernel,
|
838
|
+
min_area=self._min_area,
|
839
|
+
resolution=self._resolution,
|
840
|
+
excl_area=excl_area,
|
841
|
+
gids=gid_set,
|
842
|
+
gen_index=gen_index,
|
843
|
+
)
|
844
|
+
)
|
727
845
|
|
728
846
|
# gather results
|
729
847
|
for future in futures:
|
730
848
|
n_finished += 1
|
731
|
-
logger.info(
|
732
|
-
|
733
|
-
|
849
|
+
logger.info(
|
850
|
+
"Parallel aggregation futures collected: "
|
851
|
+
"{} out of {}".format(n_finished, len(chunks))
|
852
|
+
)
|
734
853
|
for k, v in future.result().items():
|
735
854
|
if v:
|
736
855
|
agg_out[k].extend(v)
|
737
856
|
|
738
857
|
return agg_out
|
739
858
|
|
740
|
-
def aggregate(
|
741
|
-
|
859
|
+
def aggregate(
|
860
|
+
self,
|
861
|
+
h5_fpath,
|
862
|
+
agg_method="mean",
|
863
|
+
max_workers=None,
|
864
|
+
sites_per_worker=100,
|
865
|
+
):
|
742
866
|
"""
|
743
867
|
Aggregate with given agg_method
|
744
868
|
|
@@ -766,38 +890,44 @@ class Aggregation(BaseAggregation):
|
|
766
890
|
if max_workers == 1:
|
767
891
|
self._check_files(h5_fpath)
|
768
892
|
gen_index = self._parse_gen_index(h5_fpath)
|
769
|
-
agg = self.run_serial(
|
770
|
-
|
771
|
-
|
772
|
-
|
773
|
-
|
774
|
-
|
775
|
-
|
776
|
-
|
777
|
-
|
778
|
-
|
779
|
-
|
780
|
-
|
781
|
-
|
893
|
+
agg = self.run_serial(
|
894
|
+
self._excl_fpath,
|
895
|
+
h5_fpath,
|
896
|
+
self._tm_dset,
|
897
|
+
*self._agg_dsets,
|
898
|
+
agg_method=agg_method,
|
899
|
+
excl_dict=self._excl_dict,
|
900
|
+
gids=self.gids,
|
901
|
+
inclusion_mask=self._inclusion_mask,
|
902
|
+
area_filter_kernel=self._area_filter_kernel,
|
903
|
+
min_area=self._min_area,
|
904
|
+
resolution=self._resolution,
|
905
|
+
excl_area=self._excl_area,
|
906
|
+
gen_index=gen_index,
|
907
|
+
)
|
782
908
|
else:
|
783
|
-
agg = self.run_parallel(
|
784
|
-
|
785
|
-
|
786
|
-
|
787
|
-
|
788
|
-
|
789
|
-
|
790
|
-
|
791
|
-
|
909
|
+
agg = self.run_parallel(
|
910
|
+
h5_fpath=h5_fpath,
|
911
|
+
agg_method=agg_method,
|
912
|
+
excl_area=self._excl_area,
|
913
|
+
max_workers=max_workers,
|
914
|
+
sites_per_worker=sites_per_worker,
|
915
|
+
)
|
916
|
+
|
917
|
+
if not agg["meta"]:
|
918
|
+
e = (
|
919
|
+
"Supply curve aggregation found no non-excluded SC points. "
|
920
|
+
"Please check your exclusions or subset SC GID selection."
|
921
|
+
)
|
792
922
|
logger.error(e)
|
793
923
|
raise EmptySupplyCurvePointError(e)
|
794
924
|
|
795
925
|
for k, v in agg.items():
|
796
|
-
if k ==
|
926
|
+
if k == "meta":
|
797
927
|
v = pd.concat(v, axis=1).T
|
798
|
-
v = v.sort_values(
|
928
|
+
v = v.sort_values(SupplyCurveField.SC_POINT_GID)
|
799
929
|
v = v.reset_index(drop=True)
|
800
|
-
v.index.name =
|
930
|
+
v.index.name = SupplyCurveField.SC_GID
|
801
931
|
agg[k] = v
|
802
932
|
else:
|
803
933
|
v = np.dstack(v)[0]
|
@@ -821,7 +951,7 @@ class Aggregation(BaseAggregation):
|
|
821
951
|
Aggregated values for each aggregation dataset
|
822
952
|
"""
|
823
953
|
agg_out = aggregation.copy()
|
824
|
-
meta = agg_out.pop(
|
954
|
+
meta = agg_out.pop("meta").reset_index()
|
825
955
|
for c in meta.columns:
|
826
956
|
try:
|
827
957
|
meta[c] = pd.to_numeric(meta[c])
|
@@ -840,7 +970,7 @@ class Aggregation(BaseAggregation):
|
|
840
970
|
shape = data.shape
|
841
971
|
shapes[dset] = shape
|
842
972
|
if len(data.shape) == 2:
|
843
|
-
if (
|
973
|
+
if ("time_index" in f) and (shape[0] == f.shape[0]):
|
844
974
|
if time_index is None:
|
845
975
|
time_index = f.time_index
|
846
976
|
|
@@ -849,19 +979,40 @@ class Aggregation(BaseAggregation):
|
|
849
979
|
chunks[dset] = chunk
|
850
980
|
dtypes[dset] = dtype
|
851
981
|
|
852
|
-
Outputs.init_h5(
|
853
|
-
|
854
|
-
|
855
|
-
|
982
|
+
Outputs.init_h5(
|
983
|
+
out_fpath,
|
984
|
+
dsets,
|
985
|
+
shapes,
|
986
|
+
attrs,
|
987
|
+
chunks,
|
988
|
+
dtypes,
|
989
|
+
meta,
|
990
|
+
time_index=time_index,
|
991
|
+
)
|
992
|
+
|
993
|
+
with Outputs(out_fpath, mode="a") as out:
|
856
994
|
for dset, data in agg_out.items():
|
857
995
|
out[dset] = data
|
858
996
|
|
859
997
|
@classmethod
|
860
|
-
def run(
|
861
|
-
|
862
|
-
|
863
|
-
|
864
|
-
|
998
|
+
def run(
|
999
|
+
cls,
|
1000
|
+
excl_fpath,
|
1001
|
+
h5_fpath,
|
1002
|
+
tm_dset,
|
1003
|
+
*agg_dset,
|
1004
|
+
excl_dict=None,
|
1005
|
+
area_filter_kernel="queen",
|
1006
|
+
min_area=None,
|
1007
|
+
resolution=64,
|
1008
|
+
excl_area=None,
|
1009
|
+
gids=None,
|
1010
|
+
pre_extract_inclusions=False,
|
1011
|
+
agg_method="mean",
|
1012
|
+
max_workers=None,
|
1013
|
+
sites_per_worker=100,
|
1014
|
+
out_fpath=None,
|
1015
|
+
):
|
865
1016
|
"""Get the supply curve points aggregation summary.
|
866
1017
|
|
867
1018
|
Parameters
|
@@ -923,15 +1074,25 @@ class Aggregation(BaseAggregation):
|
|
923
1074
|
Aggregated values for each aggregation dataset
|
924
1075
|
"""
|
925
1076
|
|
926
|
-
agg = cls(
|
927
|
-
|
928
|
-
|
929
|
-
|
930
|
-
|
931
|
-
|
932
|
-
|
933
|
-
|
934
|
-
|
1077
|
+
agg = cls(
|
1078
|
+
excl_fpath,
|
1079
|
+
tm_dset,
|
1080
|
+
*agg_dset,
|
1081
|
+
excl_dict=excl_dict,
|
1082
|
+
area_filter_kernel=area_filter_kernel,
|
1083
|
+
min_area=min_area,
|
1084
|
+
resolution=resolution,
|
1085
|
+
excl_area=excl_area,
|
1086
|
+
gids=gids,
|
1087
|
+
pre_extract_inclusions=pre_extract_inclusions,
|
1088
|
+
)
|
1089
|
+
|
1090
|
+
aggregation = agg.aggregate(
|
1091
|
+
h5_fpath=h5_fpath,
|
1092
|
+
agg_method=agg_method,
|
1093
|
+
max_workers=max_workers,
|
1094
|
+
sites_per_worker=sites_per_worker,
|
1095
|
+
)
|
935
1096
|
|
936
1097
|
if out_fpath is not None:
|
937
1098
|
agg.save_agg_to_h5(h5_fpath, out_fpath, aggregation)
|