NREL-reV 0.8.6__py3-none-any.whl → 0.8.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {NREL_reV-0.8.6.dist-info → NREL_reV-0.8.9.dist-info}/METADATA +12 -10
- {NREL_reV-0.8.6.dist-info → NREL_reV-0.8.9.dist-info}/RECORD +38 -38
- {NREL_reV-0.8.6.dist-info → NREL_reV-0.8.9.dist-info}/WHEEL +1 -1
- reV/SAM/SAM.py +182 -133
- reV/SAM/econ.py +18 -14
- reV/SAM/generation.py +640 -414
- reV/SAM/windbos.py +93 -79
- reV/bespoke/bespoke.py +690 -445
- reV/bespoke/place_turbines.py +6 -6
- reV/config/project_points.py +220 -140
- reV/econ/econ.py +165 -113
- reV/econ/economies_of_scale.py +57 -34
- reV/generation/base.py +310 -183
- reV/generation/generation.py +309 -191
- reV/handlers/exclusions.py +16 -15
- reV/handlers/multi_year.py +12 -9
- reV/handlers/outputs.py +6 -5
- reV/hybrids/hybrid_methods.py +28 -30
- reV/hybrids/hybrids.py +304 -188
- reV/nrwal/nrwal.py +262 -168
- reV/qa_qc/cli_qa_qc.py +14 -10
- reV/qa_qc/qa_qc.py +217 -119
- reV/qa_qc/summary.py +228 -146
- reV/rep_profiles/rep_profiles.py +349 -230
- reV/supply_curve/aggregation.py +349 -188
- reV/supply_curve/competitive_wind_farms.py +90 -48
- reV/supply_curve/exclusions.py +138 -85
- reV/supply_curve/extent.py +75 -50
- reV/supply_curve/points.py +620 -295
- reV/supply_curve/sc_aggregation.py +396 -226
- reV/supply_curve/supply_curve.py +505 -308
- reV/supply_curve/tech_mapping.py +144 -82
- reV/utilities/__init__.py +199 -16
- reV/utilities/pytest_utils.py +8 -4
- reV/version.py +1 -1
- {NREL_reV-0.8.6.dist-info → NREL_reV-0.8.9.dist-info}/LICENSE +0 -0
- {NREL_reV-0.8.6.dist-info → NREL_reV-0.8.9.dist-info}/entry_points.txt +0 -0
- {NREL_reV-0.8.6.dist-info → NREL_reV-0.8.9.dist-info}/top_level.txt +0 -0
@@ -6,29 +6,35 @@ Created on Fri Jun 21 13:24:31 2019
|
|
6
6
|
|
7
7
|
@author: gbuster
|
8
8
|
"""
|
9
|
-
from concurrent.futures import as_completed
|
10
9
|
import logging
|
11
|
-
import numpy as np
|
12
|
-
import psutil
|
13
10
|
import os
|
14
|
-
|
11
|
+
from concurrent.futures import as_completed
|
15
12
|
from warnings import warn
|
16
13
|
|
14
|
+
import numpy as np
|
15
|
+
import pandas as pd
|
16
|
+
import psutil
|
17
|
+
from rex.multi_file_resource import MultiFileResource
|
18
|
+
from rex.resource import Resource
|
19
|
+
from rex.utilities.execution import SpawnProcessPool
|
20
|
+
|
17
21
|
from reV.generation.base import BaseGen
|
18
22
|
from reV.handlers.exclusions import ExclusionLayers
|
19
|
-
from reV.supply_curve.aggregation import (
|
20
|
-
|
23
|
+
from reV.supply_curve.aggregation import (
|
24
|
+
AbstractAggFileHandler,
|
25
|
+
Aggregation,
|
26
|
+
BaseAggregation,
|
27
|
+
)
|
21
28
|
from reV.supply_curve.exclusions import FrictionMask
|
22
29
|
from reV.supply_curve.extent import SupplyCurveExtent
|
23
30
|
from reV.supply_curve.points import GenerationSupplyCurvePoint
|
24
|
-
from reV.utilities
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
from rex.utilities.execution import SpawnProcessPool
|
31
|
+
from reV.utilities import SupplyCurveField, log_versions
|
32
|
+
from reV.utilities.exceptions import (
|
33
|
+
EmptySupplyCurvePointError,
|
34
|
+
FileInputError,
|
35
|
+
InputWarning,
|
36
|
+
OutputWarning,
|
37
|
+
)
|
32
38
|
|
33
39
|
logger = logging.getLogger(__name__)
|
34
40
|
|
@@ -43,10 +49,19 @@ class SupplyCurveAggFileHandler(AbstractAggFileHandler):
|
|
43
49
|
- variable power density .csv (optional)
|
44
50
|
"""
|
45
51
|
|
46
|
-
def __init__(
|
47
|
-
|
48
|
-
|
49
|
-
|
52
|
+
def __init__(
|
53
|
+
self,
|
54
|
+
excl_fpath,
|
55
|
+
gen_fpath,
|
56
|
+
econ_fpath=None,
|
57
|
+
data_layers=None,
|
58
|
+
power_density=None,
|
59
|
+
excl_dict=None,
|
60
|
+
friction_fpath=None,
|
61
|
+
friction_dset=None,
|
62
|
+
area_filter_kernel="queen",
|
63
|
+
min_area=None,
|
64
|
+
):
|
50
65
|
"""
|
51
66
|
Parameters
|
52
67
|
----------
|
@@ -89,9 +104,12 @@ class SupplyCurveAggFileHandler(AbstractAggFileHandler):
|
|
89
104
|
min_area : float | None
|
90
105
|
Minimum required contiguous area filter in sq-km
|
91
106
|
"""
|
92
|
-
super().__init__(
|
93
|
-
|
94
|
-
|
107
|
+
super().__init__(
|
108
|
+
excl_fpath,
|
109
|
+
excl_dict=excl_dict,
|
110
|
+
area_filter_kernel=area_filter_kernel,
|
111
|
+
min_area=min_area,
|
112
|
+
)
|
95
113
|
|
96
114
|
self._gen = self._open_gen_econ_resource(gen_fpath, econ_fpath)
|
97
115
|
# pre-initialize the resource meta data
|
@@ -106,7 +124,7 @@ class SupplyCurveAggFileHandler(AbstractAggFileHandler):
|
|
106
124
|
self._friction_layer = FrictionMask(friction_fpath, friction_dset)
|
107
125
|
|
108
126
|
if not np.all(self._friction_layer.shape == self._excl.shape):
|
109
|
-
e = (
|
127
|
+
e = ("Friction layer shape {} must match exclusions shape {}!"
|
110
128
|
.format(self._friction_layer.shape, self._excl.shape))
|
111
129
|
logger.error(e)
|
112
130
|
raise FileInputError(e)
|
@@ -132,14 +150,15 @@ class SupplyCurveAggFileHandler(AbstractAggFileHandler):
|
|
132
150
|
"""
|
133
151
|
|
134
152
|
handler = None
|
135
|
-
is_gen_h5 = isinstance(gen_fpath, str) and gen_fpath.endswith(
|
136
|
-
is_econ_h5 = isinstance(econ_fpath, str) and econ_fpath.endswith(
|
153
|
+
is_gen_h5 = isinstance(gen_fpath, str) and gen_fpath.endswith(".h5")
|
154
|
+
is_econ_h5 = isinstance(econ_fpath, str) and econ_fpath.endswith(".h5")
|
137
155
|
|
138
156
|
if is_gen_h5 and not is_econ_h5:
|
139
157
|
handler = Resource(gen_fpath)
|
140
158
|
elif is_gen_h5 and is_econ_h5:
|
141
|
-
handler = MultiFileResource(
|
142
|
-
|
159
|
+
handler = MultiFileResource(
|
160
|
+
[gen_fpath, econ_fpath], check_files=True
|
161
|
+
)
|
143
162
|
|
144
163
|
return handler
|
145
164
|
|
@@ -149,20 +168,24 @@ class SupplyCurveAggFileHandler(AbstractAggFileHandler):
|
|
149
168
|
if isinstance(self._power_density, str):
|
150
169
|
self._pdf = self._power_density
|
151
170
|
|
152
|
-
if self._pdf.endswith(
|
171
|
+
if self._pdf.endswith(".csv"):
|
153
172
|
self._power_density = pd.read_csv(self._pdf)
|
154
|
-
if (
|
173
|
+
if (SupplyCurveField.GID in self._power_density
|
155
174
|
and 'power_density' in self._power_density):
|
156
|
-
self._power_density =
|
175
|
+
self._power_density = \
|
176
|
+
self._power_density.set_index(SupplyCurveField.GID)
|
157
177
|
else:
|
158
|
-
msg = ('Variable power density file must include "
|
178
|
+
msg = ('Variable power density file must include "{}" '
|
159
179
|
'and "power_density" columns, but received: {}'
|
160
|
-
.format(
|
180
|
+
.format(SupplyCurveField.GID,
|
181
|
+
self._power_density.columns.values))
|
161
182
|
logger.error(msg)
|
162
183
|
raise FileInputError(msg)
|
163
184
|
else:
|
164
|
-
msg = (
|
165
|
-
|
185
|
+
msg = (
|
186
|
+
"Variable power density file must be csv but received: "
|
187
|
+
"{}".format(self._pdf)
|
188
|
+
)
|
166
189
|
logger.error(msg)
|
167
190
|
raise FileInputError(msg)
|
168
191
|
|
@@ -231,7 +254,7 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
231
254
|
lcoe_dset='lcoe_fcr-means', h5_dsets=None, data_layers=None,
|
232
255
|
power_density=None, friction_fpath=None, friction_dset=None,
|
233
256
|
cap_cost_scale=None, recalc_lcoe=True):
|
234
|
-
"""
|
257
|
+
r"""ReV supply curve points aggregation framework.
|
235
258
|
|
236
259
|
``reV`` supply curve aggregation combines a high-resolution
|
237
260
|
(e.g. 90m) exclusion dataset with a (typically) lower resolution
|
@@ -327,6 +350,13 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
327
350
|
"more_developable_land": {
|
328
351
|
"force_include_range": [5, 10]
|
329
352
|
},
|
353
|
+
"viewsheds": {
|
354
|
+
"exclude_values": 1,
|
355
|
+
"extent": {
|
356
|
+
"layer": "federal_parks",
|
357
|
+
"include_range": [1, 5]
|
358
|
+
}
|
359
|
+
}
|
330
360
|
...
|
331
361
|
}
|
332
362
|
|
@@ -412,16 +442,29 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
412
442
|
are aggregated. By default, ``None``.
|
413
443
|
cf_dset : str, optional
|
414
444
|
Dataset name from the ``reV`` generation HDF5 output file
|
415
|
-
containing capacity factor
|
416
|
-
|
445
|
+
containing a 1D dataset of mean capacity factor values. This
|
446
|
+
dataset will be mapped onto the high-resolution grid and
|
447
|
+
used to compute the mean capacity factor for non-excluded
|
448
|
+
area. By default, ``"cf_mean-means"``.
|
417
449
|
lcoe_dset : str, optional
|
418
450
|
Dataset name from the ``reV`` generation HDF5 output file
|
419
|
-
containing
|
451
|
+
containing a 1D dataset of mean LCOE values. This
|
452
|
+
dataset will be mapped onto the high-resolution grid and
|
453
|
+
used to compute the mean LCOE for non-excluded area, but
|
454
|
+
only if the LCOE is not re-computed during processing (see
|
455
|
+
the `recalc_lcoe` input for more info).
|
420
456
|
By default, ``"lcoe_fcr-means"``.
|
421
457
|
h5_dsets : list, optional
|
422
458
|
Optional list of additional datasets from the ``reV``
|
423
459
|
generation/econ HDF5 output file to aggregate. If ``None``,
|
424
|
-
no extra datasets are aggregated.
|
460
|
+
no extra datasets are aggregated.
|
461
|
+
|
462
|
+
.. WARNING:: This input is meant for passing through 1D
|
463
|
+
datasets. If you specify a 2D or higher-dimensional
|
464
|
+
dataset, you may run into memory errors. If you wish to
|
465
|
+
aggregate 2D datasets, see the rep-profiles module.
|
466
|
+
|
467
|
+
By default, ``None``.
|
425
468
|
data_layers : dict, optional
|
426
469
|
Dictionary of aggregation data layers of the format::
|
427
470
|
|
@@ -498,7 +541,10 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
498
541
|
- ``fixed_operating_cost``
|
499
542
|
- ``variable_operating_cost``
|
500
543
|
|
501
|
-
|
544
|
+
If any of these datasets are missing from the ``reV``
|
545
|
+
generation HDF5 output, or if `recalc_lcoe` is set to
|
546
|
+
``False``, the mean LCOE will be computed from the data
|
547
|
+
stored under the `lcoe_dset` instead. By default, ``True``.
|
502
548
|
|
503
549
|
Examples
|
504
550
|
--------
|
@@ -634,15 +680,22 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
634
680
|
associated with all pixels with that unique value.
|
635
681
|
"""
|
636
682
|
log_versions(logger)
|
637
|
-
logger.info(
|
638
|
-
logger.debug(
|
639
|
-
logger.debug(
|
640
|
-
|
641
|
-
super().__init__(
|
642
|
-
|
643
|
-
|
644
|
-
|
645
|
-
|
683
|
+
logger.info("Initializing SupplyCurveAggregation...")
|
684
|
+
logger.debug("Exclusion filepath: {}".format(excl_fpath))
|
685
|
+
logger.debug("Exclusion dict: {}".format(excl_dict))
|
686
|
+
|
687
|
+
super().__init__(
|
688
|
+
excl_fpath,
|
689
|
+
tm_dset,
|
690
|
+
excl_dict=excl_dict,
|
691
|
+
area_filter_kernel=area_filter_kernel,
|
692
|
+
min_area=min_area,
|
693
|
+
resolution=resolution,
|
694
|
+
excl_area=excl_area,
|
695
|
+
res_fpath=res_fpath,
|
696
|
+
gids=gids,
|
697
|
+
pre_extract_inclusions=pre_extract_inclusions,
|
698
|
+
)
|
646
699
|
|
647
700
|
self._econ_fpath = econ_fpath
|
648
701
|
self._res_class_dset = res_class_dset
|
@@ -657,7 +710,7 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
657
710
|
self._data_layers = data_layers
|
658
711
|
self._recalc_lcoe = recalc_lcoe
|
659
712
|
|
660
|
-
logger.debug(
|
713
|
+
logger.debug("Resource class bins: {}".format(self._res_class_bins))
|
661
714
|
|
662
715
|
if self._cap_cost_scale is not None:
|
663
716
|
if self._h5_dsets is None:
|
@@ -667,16 +720,20 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
667
720
|
self._h5_dsets = list(set(self._h5_dsets))
|
668
721
|
|
669
722
|
if self._power_density is None:
|
670
|
-
msg = (
|
671
|
-
|
672
|
-
|
723
|
+
msg = (
|
724
|
+
"Supply curve aggregation power density not specified. "
|
725
|
+
"Will try to infer based on lookup table: {}".format(
|
726
|
+
GenerationSupplyCurvePoint.POWER_DENSITY
|
727
|
+
)
|
728
|
+
)
|
673
729
|
logger.warning(msg)
|
674
730
|
warn(msg, InputWarning)
|
675
731
|
|
676
732
|
self._check_data_layers()
|
677
733
|
|
678
|
-
def _check_data_layers(
|
679
|
-
|
734
|
+
def _check_data_layers(
|
735
|
+
self, methods=("mean", "max", "min", "mode", "sum", "category")
|
736
|
+
):
|
680
737
|
"""Run pre-flight checks on requested aggregation data layers.
|
681
738
|
|
682
739
|
Parameters
|
@@ -686,40 +743,49 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
686
743
|
"""
|
687
744
|
|
688
745
|
if self._data_layers is not None:
|
689
|
-
logger.debug(
|
746
|
+
logger.debug("Checking data layers...")
|
690
747
|
|
691
748
|
with ExclusionLayers(self._excl_fpath) as f:
|
692
749
|
shape_base = f.shape
|
693
750
|
|
694
751
|
for k, v in self._data_layers.items():
|
695
|
-
if
|
696
|
-
raise KeyError(
|
697
|
-
|
698
|
-
|
699
|
-
|
700
|
-
|
701
|
-
|
702
|
-
|
703
|
-
|
704
|
-
|
705
|
-
if
|
706
|
-
|
752
|
+
if "dset" not in v:
|
753
|
+
raise KeyError(
|
754
|
+
'Data aggregation "dset" data layer "{}" '
|
755
|
+
"must be specified.".format(k)
|
756
|
+
)
|
757
|
+
if "method" not in v:
|
758
|
+
raise KeyError(
|
759
|
+
'Data aggregation "method" data layer "{}" '
|
760
|
+
"must be specified.".format(k)
|
761
|
+
)
|
762
|
+
if v["method"].lower() not in methods:
|
763
|
+
raise ValueError(
|
764
|
+
"Cannot recognize data layer agg method: "
|
765
|
+
'"{}". Can only do: {}.'.format(v["method"], methods)
|
766
|
+
)
|
767
|
+
if "fpath" in v:
|
768
|
+
with ExclusionLayers(v["fpath"]) as f:
|
707
769
|
try:
|
708
770
|
mismatched_shapes = any(f.shape != shape_base)
|
709
771
|
except TypeError:
|
710
772
|
mismatched_shapes = f.shape != shape_base
|
711
773
|
if mismatched_shapes:
|
712
|
-
msg = (
|
713
|
-
|
714
|
-
|
715
|
-
|
774
|
+
msg = (
|
775
|
+
'Data shape of data layer "{}" is {}, '
|
776
|
+
"which does not match the baseline "
|
777
|
+
"exclusions shape {}.".format(
|
778
|
+
k, f.shape, shape_base
|
779
|
+
)
|
780
|
+
)
|
716
781
|
raise FileInputError(msg)
|
717
782
|
|
718
|
-
logger.debug(
|
783
|
+
logger.debug("Finished checking data layers.")
|
719
784
|
|
720
785
|
@staticmethod
|
721
|
-
def _get_res_gen_lcoe_data(
|
722
|
-
|
786
|
+
def _get_res_gen_lcoe_data(
|
787
|
+
gen, res_class_dset, res_class_bins, cf_dset, lcoe_dset
|
788
|
+
):
|
723
789
|
"""Extract the basic resource / generation / lcoe data to be used in
|
724
790
|
the aggregation process.
|
725
791
|
|
@@ -753,7 +819,7 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
753
819
|
|
754
820
|
dset_list = (res_class_dset, cf_dset, lcoe_dset)
|
755
821
|
gen_dsets = [] if gen is None else gen.datasets
|
756
|
-
labels = (
|
822
|
+
labels = ("res_class_dset", "cf_dset", "lcoe_dset")
|
757
823
|
temp = [None, None, None]
|
758
824
|
|
759
825
|
if isinstance(gen, Resource):
|
@@ -761,18 +827,23 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
761
827
|
elif isinstance(gen, MultiFileResource):
|
762
828
|
source_fps = gen._h5_files
|
763
829
|
else:
|
764
|
-
msg =
|
765
|
-
|
830
|
+
msg = 'Did not recognize gen object input of type "{}": {}'.format(
|
831
|
+
type(gen), gen
|
832
|
+
)
|
766
833
|
logger.error(msg)
|
767
834
|
raise TypeError(msg)
|
768
835
|
|
769
836
|
for i, dset in enumerate(dset_list):
|
770
837
|
if dset in gen_dsets:
|
838
|
+
_warn_about_large_datasets(gen, dset)
|
771
839
|
temp[i] = gen[dset]
|
772
840
|
elif dset not in gen_dsets and dset is not None:
|
773
|
-
w = (
|
774
|
-
|
775
|
-
|
841
|
+
w = (
|
842
|
+
'Could not find "{}" input as "{}" in source files: {}. '
|
843
|
+
"Available datasets: {}".format(
|
844
|
+
labels[i], dset, source_fps, gen_dsets
|
845
|
+
)
|
846
|
+
)
|
776
847
|
logger.warning(w)
|
777
848
|
warn(w, OutputWarning)
|
778
849
|
|
@@ -808,8 +879,10 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
808
879
|
# look for the datasets required by the LCOE re-calculation and make
|
809
880
|
# lists of the missing datasets
|
810
881
|
gen_dsets = [] if gen is None else gen.datasets
|
811
|
-
lcoe_recalc_req = ('fixed_charge_rate',
|
812
|
-
'
|
882
|
+
lcoe_recalc_req = ('fixed_charge_rate',
|
883
|
+
'capital_cost',
|
884
|
+
'fixed_operating_cost',
|
885
|
+
'variable_operating_cost',
|
813
886
|
'system_capacity')
|
814
887
|
missing_lcoe_source = [k for k in lcoe_recalc_req
|
815
888
|
if k not in gen_dsets]
|
@@ -820,61 +893,91 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
820
893
|
elif isinstance(gen, MultiFileResource):
|
821
894
|
source_fps = gen._h5_files
|
822
895
|
else:
|
823
|
-
msg =
|
824
|
-
|
896
|
+
msg = 'Did not recognize gen object input of type "{}": {}'.format(
|
897
|
+
type(gen), gen
|
898
|
+
)
|
825
899
|
logger.error(msg)
|
826
900
|
raise TypeError(msg)
|
827
901
|
|
828
902
|
h5_dsets_data = None
|
829
903
|
if h5_dsets is not None:
|
830
|
-
missing_lcoe_request = [
|
831
|
-
|
904
|
+
missing_lcoe_request = [
|
905
|
+
k for k in lcoe_recalc_req if k not in h5_dsets
|
906
|
+
]
|
832
907
|
|
833
908
|
if not isinstance(h5_dsets, (list, tuple)):
|
834
|
-
e = (
|
835
|
-
|
909
|
+
e = (
|
910
|
+
"Additional h5_dsets argument must be a list or tuple "
|
911
|
+
"but received: {} {}".format(type(h5_dsets), h5_dsets)
|
912
|
+
)
|
836
913
|
logger.error(e)
|
837
914
|
raise TypeError(e)
|
838
915
|
|
839
916
|
missing_h5_dsets = [k for k in h5_dsets if k not in gen_dsets]
|
840
917
|
if any(missing_h5_dsets):
|
841
|
-
msg = (
|
842
|
-
|
843
|
-
|
918
|
+
msg = (
|
919
|
+
'Could not find requested h5_dsets "{}" in '
|
920
|
+
"source files: {}. Available datasets: {}".format(
|
921
|
+
missing_h5_dsets, source_fps, gen_dsets
|
922
|
+
)
|
923
|
+
)
|
844
924
|
logger.error(msg)
|
845
925
|
raise FileInputError(msg)
|
846
926
|
|
847
927
|
h5_dsets_data = {dset: gen[dset] for dset in h5_dsets}
|
848
928
|
|
849
929
|
if any(missing_lcoe_source):
|
850
|
-
msg = (
|
851
|
-
|
852
|
-
|
853
|
-
|
854
|
-
|
855
|
-
|
930
|
+
msg = (
|
931
|
+
"Could not find the datasets in the gen source file that "
|
932
|
+
"are required to re-calculate the multi-year LCOE. If you "
|
933
|
+
"are running a multi-year job, it is strongly suggested "
|
934
|
+
"you pass through these datasets to re-calculate the LCOE "
|
935
|
+
"from the multi-year mean CF: {}".format(missing_lcoe_source)
|
936
|
+
)
|
856
937
|
logger.warning(msg)
|
857
938
|
warn(msg, InputWarning)
|
858
939
|
|
859
940
|
if any(missing_lcoe_request):
|
860
|
-
msg = (
|
861
|
-
|
862
|
-
|
863
|
-
|
941
|
+
msg = (
|
942
|
+
"It is strongly advised that you include the following "
|
943
|
+
"datasets in the h5_dsets request in order to re-calculate "
|
944
|
+
"the LCOE from the multi-year mean CF and AEP: {}".format(
|
945
|
+
missing_lcoe_request
|
946
|
+
)
|
947
|
+
)
|
864
948
|
logger.warning(msg)
|
865
949
|
warn(msg, InputWarning)
|
866
950
|
|
867
951
|
return h5_dsets_data
|
868
952
|
|
869
953
|
@classmethod
|
870
|
-
def run_serial(
|
871
|
-
|
872
|
-
|
873
|
-
|
874
|
-
|
875
|
-
|
876
|
-
|
877
|
-
|
954
|
+
def run_serial(
|
955
|
+
cls,
|
956
|
+
excl_fpath,
|
957
|
+
gen_fpath,
|
958
|
+
tm_dset,
|
959
|
+
gen_index,
|
960
|
+
econ_fpath=None,
|
961
|
+
excl_dict=None,
|
962
|
+
inclusion_mask=None,
|
963
|
+
area_filter_kernel="queen",
|
964
|
+
min_area=None,
|
965
|
+
resolution=64,
|
966
|
+
gids=None,
|
967
|
+
args=None,
|
968
|
+
res_class_dset=None,
|
969
|
+
res_class_bins=None,
|
970
|
+
cf_dset="cf_mean-means",
|
971
|
+
lcoe_dset="lcoe_fcr-means",
|
972
|
+
h5_dsets=None,
|
973
|
+
data_layers=None,
|
974
|
+
power_density=None,
|
975
|
+
friction_fpath=None,
|
976
|
+
friction_dset=None,
|
977
|
+
excl_area=None,
|
978
|
+
cap_cost_scale=None,
|
979
|
+
recalc_lcoe=True,
|
980
|
+
):
|
878
981
|
"""Standalone method to create agg summary - can be parallelized.
|
879
982
|
|
880
983
|
Parameters
|
@@ -985,34 +1088,38 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
985
1088
|
|
986
1089
|
slice_lookup = sc.get_slice_lookup(gids)
|
987
1090
|
|
988
|
-
logger.debug(
|
989
|
-
|
1091
|
+
logger.debug(
|
1092
|
+
"Starting SupplyCurveAggregation serial with "
|
1093
|
+
"supply curve {} gids".format(len(gids))
|
1094
|
+
)
|
990
1095
|
|
991
1096
|
cls._check_inclusion_mask(inclusion_mask, gids, exclusion_shape)
|
992
1097
|
|
993
1098
|
# pre-extract handlers so they are not repeatedly initialized
|
994
|
-
file_kwargs = {
|
995
|
-
|
996
|
-
|
997
|
-
|
998
|
-
|
999
|
-
|
1000
|
-
|
1001
|
-
|
1002
|
-
|
1003
|
-
|
1004
|
-
|
1005
|
-
|
1006
|
-
|
1007
|
-
|
1099
|
+
file_kwargs = {
|
1100
|
+
"econ_fpath": econ_fpath,
|
1101
|
+
"data_layers": data_layers,
|
1102
|
+
"power_density": power_density,
|
1103
|
+
"excl_dict": excl_dict,
|
1104
|
+
"area_filter_kernel": area_filter_kernel,
|
1105
|
+
"min_area": min_area,
|
1106
|
+
"friction_fpath": friction_fpath,
|
1107
|
+
"friction_dset": friction_dset,
|
1108
|
+
}
|
1109
|
+
with SupplyCurveAggFileHandler(
|
1110
|
+
excl_fpath, gen_fpath, **file_kwargs
|
1111
|
+
) as fh:
|
1112
|
+
temp = cls._get_res_gen_lcoe_data(
|
1113
|
+
fh.gen, res_class_dset, res_class_bins, cf_dset, lcoe_dset
|
1114
|
+
)
|
1008
1115
|
res_data, res_class_bins, cf_data, lcoe_data = temp
|
1009
1116
|
h5_dsets_data = cls._get_extra_dsets(fh.gen, h5_dsets)
|
1010
1117
|
|
1011
1118
|
n_finished = 0
|
1012
1119
|
for gid in gids:
|
1013
1120
|
gid_inclusions = cls._get_gid_inclusion_mask(
|
1014
|
-
inclusion_mask, gid, slice_lookup,
|
1015
|
-
|
1121
|
+
inclusion_mask, gid, slice_lookup, resolution=resolution
|
1122
|
+
)
|
1016
1123
|
|
1017
1124
|
for ri, res_bin in enumerate(res_class_bins):
|
1018
1125
|
try:
|
@@ -1038,27 +1145,34 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1038
1145
|
close=False,
|
1039
1146
|
friction_layer=fh.friction_layer,
|
1040
1147
|
cap_cost_scale=cap_cost_scale,
|
1041
|
-
recalc_lcoe=recalc_lcoe
|
1148
|
+
recalc_lcoe=recalc_lcoe,
|
1149
|
+
)
|
1042
1150
|
|
1043
1151
|
except EmptySupplyCurvePointError:
|
1044
|
-
logger.debug(
|
1152
|
+
logger.debug("SC point {} is empty".format(gid))
|
1045
1153
|
else:
|
1046
|
-
pointsum[
|
1047
|
-
pointsum[
|
1048
|
-
|
1154
|
+
pointsum[SupplyCurveField.SC_POINT_GID] = gid
|
1155
|
+
pointsum[SupplyCurveField.SC_ROW_IND] = \
|
1156
|
+
points.loc[gid, 'row_ind']
|
1157
|
+
pointsum[SupplyCurveField.SC_COL_IND] = \
|
1158
|
+
points.loc[gid, 'col_ind']
|
1049
1159
|
pointsum['res_class'] = ri
|
1050
1160
|
|
1051
1161
|
summary.append(pointsum)
|
1052
|
-
logger.debug(
|
1053
|
-
|
1054
|
-
|
1162
|
+
logger.debug(
|
1163
|
+
"Serial aggregation completed gid {}: "
|
1164
|
+
"{} out of {} points complete".format(
|
1165
|
+
gid, n_finished, len(gids)
|
1166
|
+
)
|
1167
|
+
)
|
1055
1168
|
|
1056
1169
|
n_finished += 1
|
1057
1170
|
|
1058
1171
|
return summary
|
1059
1172
|
|
1060
|
-
def run_parallel(
|
1061
|
-
|
1173
|
+
def run_parallel(
|
1174
|
+
self, gen_fpath, args=None, max_workers=None, sites_per_worker=100
|
1175
|
+
):
|
1062
1176
|
"""Get the supply curve points aggregation summary using futures.
|
1063
1177
|
|
1064
1178
|
Parameters
|
@@ -1084,25 +1198,31 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1084
1198
|
chunks = int(np.ceil(len(self.gids) / sites_per_worker))
|
1085
1199
|
chunks = np.array_split(self.gids, chunks)
|
1086
1200
|
|
1087
|
-
logger.info(
|
1088
|
-
|
1089
|
-
|
1090
|
-
|
1091
|
-
|
1201
|
+
logger.info(
|
1202
|
+
"Running supply curve point aggregation for "
|
1203
|
+
"points {} through {} at a resolution of {} "
|
1204
|
+
"on {} cores in {} chunks.".format(
|
1205
|
+
self.gids[0],
|
1206
|
+
self.gids[-1],
|
1207
|
+
self._resolution,
|
1208
|
+
max_workers,
|
1209
|
+
len(chunks),
|
1210
|
+
)
|
1211
|
+
)
|
1092
1212
|
|
1093
1213
|
slice_lookup = None
|
1094
1214
|
if self._inclusion_mask is not None:
|
1095
|
-
with SupplyCurveExtent(
|
1096
|
-
|
1215
|
+
with SupplyCurveExtent(
|
1216
|
+
self._excl_fpath, resolution=self._resolution
|
1217
|
+
) as sc:
|
1097
1218
|
assert sc.exclusions.shape == self._inclusion_mask.shape
|
1098
1219
|
slice_lookup = sc.get_slice_lookup(self.gids)
|
1099
1220
|
|
1100
1221
|
futures = []
|
1101
1222
|
summary = []
|
1102
1223
|
n_finished = 0
|
1103
|
-
loggers = [__name__,
|
1224
|
+
loggers = [__name__, "reV.supply_curve.point_summary", "reV"]
|
1104
1225
|
with SpawnProcessPool(max_workers=max_workers, loggers=loggers) as exe:
|
1105
|
-
|
1106
1226
|
# iterate through split executions, submitting each to worker
|
1107
1227
|
for gid_set in chunks:
|
1108
1228
|
# submit executions and append to futures list
|
@@ -1113,30 +1233,35 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1113
1233
|
rs, cs = slice_lookup[gid]
|
1114
1234
|
chunk_incl_masks[gid] = self._inclusion_mask[rs, cs]
|
1115
1235
|
|
1116
|
-
futures.append(
|
1117
|
-
|
1118
|
-
|
1119
|
-
|
1120
|
-
|
1121
|
-
|
1122
|
-
|
1123
|
-
|
1124
|
-
|
1125
|
-
|
1126
|
-
|
1127
|
-
|
1128
|
-
|
1129
|
-
|
1130
|
-
|
1131
|
-
|
1132
|
-
|
1133
|
-
|
1134
|
-
|
1135
|
-
|
1136
|
-
|
1137
|
-
|
1138
|
-
|
1139
|
-
|
1236
|
+
futures.append(
|
1237
|
+
exe.submit(
|
1238
|
+
self.run_serial,
|
1239
|
+
self._excl_fpath,
|
1240
|
+
gen_fpath,
|
1241
|
+
self._tm_dset,
|
1242
|
+
gen_index,
|
1243
|
+
econ_fpath=self._econ_fpath,
|
1244
|
+
excl_dict=self._excl_dict,
|
1245
|
+
inclusion_mask=chunk_incl_masks,
|
1246
|
+
res_class_dset=self._res_class_dset,
|
1247
|
+
res_class_bins=self._res_class_bins,
|
1248
|
+
cf_dset=self._cf_dset,
|
1249
|
+
lcoe_dset=self._lcoe_dset,
|
1250
|
+
h5_dsets=self._h5_dsets,
|
1251
|
+
data_layers=self._data_layers,
|
1252
|
+
resolution=self._resolution,
|
1253
|
+
power_density=self._power_density,
|
1254
|
+
friction_fpath=self._friction_fpath,
|
1255
|
+
friction_dset=self._friction_dset,
|
1256
|
+
area_filter_kernel=self._area_filter_kernel,
|
1257
|
+
min_area=self._min_area,
|
1258
|
+
gids=gid_set,
|
1259
|
+
args=args,
|
1260
|
+
excl_area=self._excl_area,
|
1261
|
+
cap_cost_scale=self._cap_cost_scale,
|
1262
|
+
recalc_lcoe=self._recalc_lcoe,
|
1263
|
+
)
|
1264
|
+
)
|
1140
1265
|
|
1141
1266
|
# gather results
|
1142
1267
|
for future in as_completed(futures):
|
@@ -1144,12 +1269,17 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1144
1269
|
summary += future.result()
|
1145
1270
|
if n_finished % 10 == 0:
|
1146
1271
|
mem = psutil.virtual_memory()
|
1147
|
-
logger.info(
|
1148
|
-
|
1149
|
-
|
1150
|
-
|
1151
|
-
|
1152
|
-
|
1272
|
+
logger.info(
|
1273
|
+
"Parallel aggregation futures collected: "
|
1274
|
+
"{} out of {}. Memory usage is {:.3f} GB out "
|
1275
|
+
"of {:.3f} GB ({:.2f}% utilized).".format(
|
1276
|
+
n_finished,
|
1277
|
+
len(chunks),
|
1278
|
+
mem.used / 1e9,
|
1279
|
+
mem.total / 1e9,
|
1280
|
+
100 * mem.used / mem.total,
|
1281
|
+
)
|
1282
|
+
)
|
1153
1283
|
|
1154
1284
|
return summary
|
1155
1285
|
|
@@ -1177,17 +1307,18 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1177
1307
|
if all(type_check):
|
1178
1308
|
return bins
|
1179
1309
|
|
1180
|
-
|
1181
|
-
raise TypeError(
|
1182
|
-
|
1310
|
+
if any(type_check):
|
1311
|
+
raise TypeError(
|
1312
|
+
"Resource class bins has inconsistent "
|
1313
|
+
"entry type: {}".format(bins)
|
1314
|
+
)
|
1183
1315
|
|
1184
|
-
|
1185
|
-
|
1186
|
-
|
1187
|
-
|
1188
|
-
bbins.append([b, bins[i + 1]])
|
1316
|
+
bbins = []
|
1317
|
+
for i, b in enumerate(sorted(bins)):
|
1318
|
+
if i < len(bins) - 1:
|
1319
|
+
bbins.append([b, bins[i + 1]])
|
1189
1320
|
|
1190
|
-
|
1321
|
+
return bbins
|
1191
1322
|
|
1192
1323
|
@staticmethod
|
1193
1324
|
def _summary_to_df(summary):
|
@@ -1204,15 +1335,17 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1204
1335
|
Summary of the SC points.
|
1205
1336
|
"""
|
1206
1337
|
summary = pd.DataFrame(summary)
|
1207
|
-
sort_by = [x for x in (
|
1338
|
+
sort_by = [x for x in (SupplyCurveField.SC_POINT_GID, 'res_class')
|
1339
|
+
if x in summary]
|
1208
1340
|
summary = summary.sort_values(sort_by)
|
1209
1341
|
summary = summary.reset_index(drop=True)
|
1210
|
-
summary.index.name =
|
1342
|
+
summary.index.name = SupplyCurveField.SC_GID
|
1211
1343
|
|
1212
1344
|
return summary
|
1213
1345
|
|
1214
|
-
def summarize(
|
1215
|
-
|
1346
|
+
def summarize(
|
1347
|
+
self, gen_fpath, args=None, max_workers=None, sites_per_worker=100
|
1348
|
+
):
|
1216
1349
|
"""
|
1217
1350
|
Get the supply curve points aggregation summary
|
1218
1351
|
|
@@ -1240,35 +1373,45 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1240
1373
|
if max_workers == 1:
|
1241
1374
|
gen_index = self._parse_gen_index(gen_fpath)
|
1242
1375
|
afk = self._area_filter_kernel
|
1243
|
-
summary = self.run_serial(
|
1244
|
-
|
1245
|
-
|
1246
|
-
|
1247
|
-
|
1248
|
-
|
1249
|
-
|
1250
|
-
|
1251
|
-
|
1252
|
-
|
1253
|
-
|
1254
|
-
|
1255
|
-
|
1256
|
-
|
1257
|
-
|
1258
|
-
|
1259
|
-
|
1260
|
-
|
1261
|
-
|
1262
|
-
|
1263
|
-
|
1376
|
+
summary = self.run_serial(
|
1377
|
+
self._excl_fpath,
|
1378
|
+
gen_fpath,
|
1379
|
+
self._tm_dset,
|
1380
|
+
gen_index,
|
1381
|
+
econ_fpath=self._econ_fpath,
|
1382
|
+
excl_dict=self._excl_dict,
|
1383
|
+
inclusion_mask=self._inclusion_mask,
|
1384
|
+
res_class_dset=self._res_class_dset,
|
1385
|
+
res_class_bins=self._res_class_bins,
|
1386
|
+
cf_dset=self._cf_dset,
|
1387
|
+
lcoe_dset=self._lcoe_dset,
|
1388
|
+
h5_dsets=self._h5_dsets,
|
1389
|
+
data_layers=self._data_layers,
|
1390
|
+
resolution=self._resolution,
|
1391
|
+
power_density=self._power_density,
|
1392
|
+
friction_fpath=self._friction_fpath,
|
1393
|
+
friction_dset=self._friction_dset,
|
1394
|
+
area_filter_kernel=afk,
|
1395
|
+
min_area=self._min_area,
|
1396
|
+
gids=self.gids,
|
1397
|
+
args=args,
|
1398
|
+
excl_area=self._excl_area,
|
1399
|
+
cap_cost_scale=self._cap_cost_scale,
|
1400
|
+
recalc_lcoe=self._recalc_lcoe,
|
1401
|
+
)
|
1264
1402
|
else:
|
1265
|
-
summary = self.run_parallel(
|
1266
|
-
|
1267
|
-
|
1403
|
+
summary = self.run_parallel(
|
1404
|
+
gen_fpath=gen_fpath,
|
1405
|
+
args=args,
|
1406
|
+
max_workers=max_workers,
|
1407
|
+
sites_per_worker=sites_per_worker,
|
1408
|
+
)
|
1268
1409
|
|
1269
1410
|
if not any(summary):
|
1270
|
-
e = (
|
1271
|
-
|
1411
|
+
e = (
|
1412
|
+
"Supply curve aggregation found no non-excluded SC points. "
|
1413
|
+
"Please check your exclusions or subset SC GID selection."
|
1414
|
+
)
|
1272
1415
|
logger.error(e)
|
1273
1416
|
raise EmptySupplyCurvePointError(e)
|
1274
1417
|
|
@@ -1276,8 +1419,14 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1276
1419
|
|
1277
1420
|
return summary
|
1278
1421
|
|
1279
|
-
def run(
|
1280
|
-
|
1422
|
+
def run(
|
1423
|
+
self,
|
1424
|
+
out_fpath,
|
1425
|
+
gen_fpath=None,
|
1426
|
+
args=None,
|
1427
|
+
max_workers=None,
|
1428
|
+
sites_per_worker=100,
|
1429
|
+
):
|
1281
1430
|
"""Run a supply curve aggregation.
|
1282
1431
|
|
1283
1432
|
Parameters
|
@@ -1316,7 +1465,9 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1316
1465
|
|
1317
1466
|
if gen_fpath is None:
|
1318
1467
|
out = Aggregation.run(
|
1319
|
-
self._excl_fpath,
|
1468
|
+
self._excl_fpath,
|
1469
|
+
self._res_fpath,
|
1470
|
+
self._tm_dset,
|
1320
1471
|
excl_dict=self._excl_dict,
|
1321
1472
|
resolution=self._resolution,
|
1322
1473
|
excl_area=self._excl_area,
|
@@ -1324,12 +1475,16 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1324
1475
|
min_area=self._min_area,
|
1325
1476
|
pre_extract_inclusions=self._pre_extract_inclusions,
|
1326
1477
|
max_workers=max_workers,
|
1327
|
-
sites_per_worker=sites_per_worker
|
1328
|
-
|
1478
|
+
sites_per_worker=sites_per_worker,
|
1479
|
+
)
|
1480
|
+
summary = out["meta"]
|
1329
1481
|
else:
|
1330
|
-
summary = self.summarize(
|
1331
|
-
|
1332
|
-
|
1482
|
+
summary = self.summarize(
|
1483
|
+
gen_fpath=gen_fpath,
|
1484
|
+
args=args,
|
1485
|
+
max_workers=max_workers,
|
1486
|
+
sites_per_worker=sites_per_worker,
|
1487
|
+
)
|
1333
1488
|
|
1334
1489
|
out_fpath = _format_sc_agg_out_fpath(out_fpath)
|
1335
1490
|
summary.to_csv(out_fpath)
|
@@ -1340,9 +1495,24 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1340
1495
|
def _format_sc_agg_out_fpath(out_fpath):
|
1341
1496
|
"""Add CSV file ending and replace underscore, if necessary."""
|
1342
1497
|
if not out_fpath.endswith(".csv"):
|
1343
|
-
out_fpath =
|
1498
|
+
out_fpath = "{}.csv".format(out_fpath)
|
1344
1499
|
|
1345
1500
|
project_dir, out_fn = os.path.split(out_fpath)
|
1346
|
-
out_fn = out_fn.replace(
|
1347
|
-
|
1501
|
+
out_fn = out_fn.replace(
|
1502
|
+
"supply_curve_aggregation", "supply-curve-aggregation"
|
1503
|
+
)
|
1348
1504
|
return os.path.join(project_dir, out_fn)
|
1505
|
+
|
1506
|
+
|
1507
|
+
def _warn_about_large_datasets(gen, dset):
|
1508
|
+
"""Warn user about multi-dimensional datasets in passthrough datasets"""
|
1509
|
+
dset_shape = gen.shapes.get(dset, (1,))
|
1510
|
+
if len(dset_shape) > 1:
|
1511
|
+
msg = (
|
1512
|
+
"Generation dataset {!r} is not 1-dimensional (shape: {})."
|
1513
|
+
"You may run into memory errors during aggregation - use "
|
1514
|
+
"rep-profiles for aggregating higher-order datasets instead!"
|
1515
|
+
.format(dset, dset_shape)
|
1516
|
+
)
|
1517
|
+
logger.warning(msg)
|
1518
|
+
warn(msg, UserWarning)
|