NREL-reV 0.8.7__py3-none-any.whl → 0.8.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.8.9.dist-info}/METADATA +12 -10
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.8.9.dist-info}/RECORD +38 -38
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.8.9.dist-info}/WHEEL +1 -1
- reV/SAM/SAM.py +182 -133
- reV/SAM/econ.py +18 -14
- reV/SAM/generation.py +608 -419
- reV/SAM/windbos.py +93 -79
- reV/bespoke/bespoke.py +690 -445
- reV/bespoke/place_turbines.py +6 -6
- reV/config/project_points.py +220 -140
- reV/econ/econ.py +165 -113
- reV/econ/economies_of_scale.py +57 -34
- reV/generation/base.py +310 -183
- reV/generation/generation.py +298 -190
- reV/handlers/exclusions.py +16 -15
- reV/handlers/multi_year.py +12 -9
- reV/handlers/outputs.py +6 -5
- reV/hybrids/hybrid_methods.py +28 -30
- reV/hybrids/hybrids.py +304 -188
- reV/nrwal/nrwal.py +262 -168
- reV/qa_qc/cli_qa_qc.py +14 -10
- reV/qa_qc/qa_qc.py +217 -119
- reV/qa_qc/summary.py +228 -146
- reV/rep_profiles/rep_profiles.py +349 -230
- reV/supply_curve/aggregation.py +349 -188
- reV/supply_curve/competitive_wind_farms.py +90 -48
- reV/supply_curve/exclusions.py +138 -85
- reV/supply_curve/extent.py +75 -50
- reV/supply_curve/points.py +536 -309
- reV/supply_curve/sc_aggregation.py +366 -225
- reV/supply_curve/supply_curve.py +505 -308
- reV/supply_curve/tech_mapping.py +144 -82
- reV/utilities/__init__.py +199 -16
- reV/utilities/pytest_utils.py +8 -4
- reV/version.py +1 -1
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.8.9.dist-info}/LICENSE +0 -0
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.8.9.dist-info}/entry_points.txt +0 -0
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.8.9.dist-info}/top_level.txt +0 -0
@@ -6,29 +6,35 @@ Created on Fri Jun 21 13:24:31 2019
|
|
6
6
|
|
7
7
|
@author: gbuster
|
8
8
|
"""
|
9
|
-
from concurrent.futures import as_completed
|
10
9
|
import logging
|
11
|
-
import numpy as np
|
12
|
-
import psutil
|
13
10
|
import os
|
14
|
-
|
11
|
+
from concurrent.futures import as_completed
|
15
12
|
from warnings import warn
|
16
13
|
|
14
|
+
import numpy as np
|
15
|
+
import pandas as pd
|
16
|
+
import psutil
|
17
|
+
from rex.multi_file_resource import MultiFileResource
|
18
|
+
from rex.resource import Resource
|
19
|
+
from rex.utilities.execution import SpawnProcessPool
|
20
|
+
|
17
21
|
from reV.generation.base import BaseGen
|
18
22
|
from reV.handlers.exclusions import ExclusionLayers
|
19
|
-
from reV.supply_curve.aggregation import (
|
20
|
-
|
23
|
+
from reV.supply_curve.aggregation import (
|
24
|
+
AbstractAggFileHandler,
|
25
|
+
Aggregation,
|
26
|
+
BaseAggregation,
|
27
|
+
)
|
21
28
|
from reV.supply_curve.exclusions import FrictionMask
|
22
29
|
from reV.supply_curve.extent import SupplyCurveExtent
|
23
30
|
from reV.supply_curve.points import GenerationSupplyCurvePoint
|
24
|
-
from reV.utilities
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
from rex.utilities.execution import SpawnProcessPool
|
31
|
+
from reV.utilities import SupplyCurveField, log_versions
|
32
|
+
from reV.utilities.exceptions import (
|
33
|
+
EmptySupplyCurvePointError,
|
34
|
+
FileInputError,
|
35
|
+
InputWarning,
|
36
|
+
OutputWarning,
|
37
|
+
)
|
32
38
|
|
33
39
|
logger = logging.getLogger(__name__)
|
34
40
|
|
@@ -43,10 +49,19 @@ class SupplyCurveAggFileHandler(AbstractAggFileHandler):
|
|
43
49
|
- variable power density .csv (optional)
|
44
50
|
"""
|
45
51
|
|
46
|
-
def __init__(
|
47
|
-
|
48
|
-
|
49
|
-
|
52
|
+
def __init__(
|
53
|
+
self,
|
54
|
+
excl_fpath,
|
55
|
+
gen_fpath,
|
56
|
+
econ_fpath=None,
|
57
|
+
data_layers=None,
|
58
|
+
power_density=None,
|
59
|
+
excl_dict=None,
|
60
|
+
friction_fpath=None,
|
61
|
+
friction_dset=None,
|
62
|
+
area_filter_kernel="queen",
|
63
|
+
min_area=None,
|
64
|
+
):
|
50
65
|
"""
|
51
66
|
Parameters
|
52
67
|
----------
|
@@ -89,9 +104,12 @@ class SupplyCurveAggFileHandler(AbstractAggFileHandler):
|
|
89
104
|
min_area : float | None
|
90
105
|
Minimum required contiguous area filter in sq-km
|
91
106
|
"""
|
92
|
-
super().__init__(
|
93
|
-
|
94
|
-
|
107
|
+
super().__init__(
|
108
|
+
excl_fpath,
|
109
|
+
excl_dict=excl_dict,
|
110
|
+
area_filter_kernel=area_filter_kernel,
|
111
|
+
min_area=min_area,
|
112
|
+
)
|
95
113
|
|
96
114
|
self._gen = self._open_gen_econ_resource(gen_fpath, econ_fpath)
|
97
115
|
# pre-initialize the resource meta data
|
@@ -106,7 +124,7 @@ class SupplyCurveAggFileHandler(AbstractAggFileHandler):
|
|
106
124
|
self._friction_layer = FrictionMask(friction_fpath, friction_dset)
|
107
125
|
|
108
126
|
if not np.all(self._friction_layer.shape == self._excl.shape):
|
109
|
-
e = (
|
127
|
+
e = ("Friction layer shape {} must match exclusions shape {}!"
|
110
128
|
.format(self._friction_layer.shape, self._excl.shape))
|
111
129
|
logger.error(e)
|
112
130
|
raise FileInputError(e)
|
@@ -132,14 +150,15 @@ class SupplyCurveAggFileHandler(AbstractAggFileHandler):
|
|
132
150
|
"""
|
133
151
|
|
134
152
|
handler = None
|
135
|
-
is_gen_h5 = isinstance(gen_fpath, str) and gen_fpath.endswith(
|
136
|
-
is_econ_h5 = isinstance(econ_fpath, str) and econ_fpath.endswith(
|
153
|
+
is_gen_h5 = isinstance(gen_fpath, str) and gen_fpath.endswith(".h5")
|
154
|
+
is_econ_h5 = isinstance(econ_fpath, str) and econ_fpath.endswith(".h5")
|
137
155
|
|
138
156
|
if is_gen_h5 and not is_econ_h5:
|
139
157
|
handler = Resource(gen_fpath)
|
140
158
|
elif is_gen_h5 and is_econ_h5:
|
141
|
-
handler = MultiFileResource(
|
142
|
-
|
159
|
+
handler = MultiFileResource(
|
160
|
+
[gen_fpath, econ_fpath], check_files=True
|
161
|
+
)
|
143
162
|
|
144
163
|
return handler
|
145
164
|
|
@@ -149,20 +168,24 @@ class SupplyCurveAggFileHandler(AbstractAggFileHandler):
|
|
149
168
|
if isinstance(self._power_density, str):
|
150
169
|
self._pdf = self._power_density
|
151
170
|
|
152
|
-
if self._pdf.endswith(
|
171
|
+
if self._pdf.endswith(".csv"):
|
153
172
|
self._power_density = pd.read_csv(self._pdf)
|
154
|
-
if (
|
173
|
+
if (SupplyCurveField.GID in self._power_density
|
155
174
|
and 'power_density' in self._power_density):
|
156
|
-
self._power_density =
|
175
|
+
self._power_density = \
|
176
|
+
self._power_density.set_index(SupplyCurveField.GID)
|
157
177
|
else:
|
158
|
-
msg = ('Variable power density file must include "
|
178
|
+
msg = ('Variable power density file must include "{}" '
|
159
179
|
'and "power_density" columns, but received: {}'
|
160
|
-
.format(
|
180
|
+
.format(SupplyCurveField.GID,
|
181
|
+
self._power_density.columns.values))
|
161
182
|
logger.error(msg)
|
162
183
|
raise FileInputError(msg)
|
163
184
|
else:
|
164
|
-
msg = (
|
165
|
-
|
185
|
+
msg = (
|
186
|
+
"Variable power density file must be csv but received: "
|
187
|
+
"{}".format(self._pdf)
|
188
|
+
)
|
166
189
|
logger.error(msg)
|
167
190
|
raise FileInputError(msg)
|
168
191
|
|
@@ -231,7 +254,7 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
231
254
|
lcoe_dset='lcoe_fcr-means', h5_dsets=None, data_layers=None,
|
232
255
|
power_density=None, friction_fpath=None, friction_dset=None,
|
233
256
|
cap_cost_scale=None, recalc_lcoe=True):
|
234
|
-
"""
|
257
|
+
r"""ReV supply curve points aggregation framework.
|
235
258
|
|
236
259
|
``reV`` supply curve aggregation combines a high-resolution
|
237
260
|
(e.g. 90m) exclusion dataset with a (typically) lower resolution
|
@@ -327,6 +350,13 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
327
350
|
"more_developable_land": {
|
328
351
|
"force_include_range": [5, 10]
|
329
352
|
},
|
353
|
+
"viewsheds": {
|
354
|
+
"exclude_values": 1,
|
355
|
+
"extent": {
|
356
|
+
"layer": "federal_parks",
|
357
|
+
"include_range": [1, 5]
|
358
|
+
}
|
359
|
+
}
|
330
360
|
...
|
331
361
|
}
|
332
362
|
|
@@ -650,15 +680,22 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
650
680
|
associated with all pixels with that unique value.
|
651
681
|
"""
|
652
682
|
log_versions(logger)
|
653
|
-
logger.info(
|
654
|
-
logger.debug(
|
655
|
-
logger.debug(
|
656
|
-
|
657
|
-
super().__init__(
|
658
|
-
|
659
|
-
|
660
|
-
|
661
|
-
|
683
|
+
logger.info("Initializing SupplyCurveAggregation...")
|
684
|
+
logger.debug("Exclusion filepath: {}".format(excl_fpath))
|
685
|
+
logger.debug("Exclusion dict: {}".format(excl_dict))
|
686
|
+
|
687
|
+
super().__init__(
|
688
|
+
excl_fpath,
|
689
|
+
tm_dset,
|
690
|
+
excl_dict=excl_dict,
|
691
|
+
area_filter_kernel=area_filter_kernel,
|
692
|
+
min_area=min_area,
|
693
|
+
resolution=resolution,
|
694
|
+
excl_area=excl_area,
|
695
|
+
res_fpath=res_fpath,
|
696
|
+
gids=gids,
|
697
|
+
pre_extract_inclusions=pre_extract_inclusions,
|
698
|
+
)
|
662
699
|
|
663
700
|
self._econ_fpath = econ_fpath
|
664
701
|
self._res_class_dset = res_class_dset
|
@@ -673,7 +710,7 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
673
710
|
self._data_layers = data_layers
|
674
711
|
self._recalc_lcoe = recalc_lcoe
|
675
712
|
|
676
|
-
logger.debug(
|
713
|
+
logger.debug("Resource class bins: {}".format(self._res_class_bins))
|
677
714
|
|
678
715
|
if self._cap_cost_scale is not None:
|
679
716
|
if self._h5_dsets is None:
|
@@ -683,16 +720,20 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
683
720
|
self._h5_dsets = list(set(self._h5_dsets))
|
684
721
|
|
685
722
|
if self._power_density is None:
|
686
|
-
msg = (
|
687
|
-
|
688
|
-
|
723
|
+
msg = (
|
724
|
+
"Supply curve aggregation power density not specified. "
|
725
|
+
"Will try to infer based on lookup table: {}".format(
|
726
|
+
GenerationSupplyCurvePoint.POWER_DENSITY
|
727
|
+
)
|
728
|
+
)
|
689
729
|
logger.warning(msg)
|
690
730
|
warn(msg, InputWarning)
|
691
731
|
|
692
732
|
self._check_data_layers()
|
693
733
|
|
694
|
-
def _check_data_layers(
|
695
|
-
|
734
|
+
def _check_data_layers(
|
735
|
+
self, methods=("mean", "max", "min", "mode", "sum", "category")
|
736
|
+
):
|
696
737
|
"""Run pre-flight checks on requested aggregation data layers.
|
697
738
|
|
698
739
|
Parameters
|
@@ -702,40 +743,49 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
702
743
|
"""
|
703
744
|
|
704
745
|
if self._data_layers is not None:
|
705
|
-
logger.debug(
|
746
|
+
logger.debug("Checking data layers...")
|
706
747
|
|
707
748
|
with ExclusionLayers(self._excl_fpath) as f:
|
708
749
|
shape_base = f.shape
|
709
750
|
|
710
751
|
for k, v in self._data_layers.items():
|
711
|
-
if
|
712
|
-
raise KeyError(
|
713
|
-
|
714
|
-
|
715
|
-
|
716
|
-
|
717
|
-
|
718
|
-
|
719
|
-
|
720
|
-
|
721
|
-
if
|
722
|
-
|
752
|
+
if "dset" not in v:
|
753
|
+
raise KeyError(
|
754
|
+
'Data aggregation "dset" data layer "{}" '
|
755
|
+
"must be specified.".format(k)
|
756
|
+
)
|
757
|
+
if "method" not in v:
|
758
|
+
raise KeyError(
|
759
|
+
'Data aggregation "method" data layer "{}" '
|
760
|
+
"must be specified.".format(k)
|
761
|
+
)
|
762
|
+
if v["method"].lower() not in methods:
|
763
|
+
raise ValueError(
|
764
|
+
"Cannot recognize data layer agg method: "
|
765
|
+
'"{}". Can only do: {}.'.format(v["method"], methods)
|
766
|
+
)
|
767
|
+
if "fpath" in v:
|
768
|
+
with ExclusionLayers(v["fpath"]) as f:
|
723
769
|
try:
|
724
770
|
mismatched_shapes = any(f.shape != shape_base)
|
725
771
|
except TypeError:
|
726
772
|
mismatched_shapes = f.shape != shape_base
|
727
773
|
if mismatched_shapes:
|
728
|
-
msg = (
|
729
|
-
|
730
|
-
|
731
|
-
|
774
|
+
msg = (
|
775
|
+
'Data shape of data layer "{}" is {}, '
|
776
|
+
"which does not match the baseline "
|
777
|
+
"exclusions shape {}.".format(
|
778
|
+
k, f.shape, shape_base
|
779
|
+
)
|
780
|
+
)
|
732
781
|
raise FileInputError(msg)
|
733
782
|
|
734
|
-
logger.debug(
|
783
|
+
logger.debug("Finished checking data layers.")
|
735
784
|
|
736
785
|
@staticmethod
|
737
|
-
def _get_res_gen_lcoe_data(
|
738
|
-
|
786
|
+
def _get_res_gen_lcoe_data(
|
787
|
+
gen, res_class_dset, res_class_bins, cf_dset, lcoe_dset
|
788
|
+
):
|
739
789
|
"""Extract the basic resource / generation / lcoe data to be used in
|
740
790
|
the aggregation process.
|
741
791
|
|
@@ -769,7 +819,7 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
769
819
|
|
770
820
|
dset_list = (res_class_dset, cf_dset, lcoe_dset)
|
771
821
|
gen_dsets = [] if gen is None else gen.datasets
|
772
|
-
labels = (
|
822
|
+
labels = ("res_class_dset", "cf_dset", "lcoe_dset")
|
773
823
|
temp = [None, None, None]
|
774
824
|
|
775
825
|
if isinstance(gen, Resource):
|
@@ -777,8 +827,9 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
777
827
|
elif isinstance(gen, MultiFileResource):
|
778
828
|
source_fps = gen._h5_files
|
779
829
|
else:
|
780
|
-
msg =
|
781
|
-
|
830
|
+
msg = 'Did not recognize gen object input of type "{}": {}'.format(
|
831
|
+
type(gen), gen
|
832
|
+
)
|
782
833
|
logger.error(msg)
|
783
834
|
raise TypeError(msg)
|
784
835
|
|
@@ -787,9 +838,12 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
787
838
|
_warn_about_large_datasets(gen, dset)
|
788
839
|
temp[i] = gen[dset]
|
789
840
|
elif dset not in gen_dsets and dset is not None:
|
790
|
-
w = (
|
791
|
-
|
792
|
-
|
841
|
+
w = (
|
842
|
+
'Could not find "{}" input as "{}" in source files: {}. '
|
843
|
+
"Available datasets: {}".format(
|
844
|
+
labels[i], dset, source_fps, gen_dsets
|
845
|
+
)
|
846
|
+
)
|
793
847
|
logger.warning(w)
|
794
848
|
warn(w, OutputWarning)
|
795
849
|
|
@@ -825,8 +879,10 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
825
879
|
# look for the datasets required by the LCOE re-calculation and make
|
826
880
|
# lists of the missing datasets
|
827
881
|
gen_dsets = [] if gen is None else gen.datasets
|
828
|
-
lcoe_recalc_req = ('fixed_charge_rate',
|
829
|
-
'
|
882
|
+
lcoe_recalc_req = ('fixed_charge_rate',
|
883
|
+
'capital_cost',
|
884
|
+
'fixed_operating_cost',
|
885
|
+
'variable_operating_cost',
|
830
886
|
'system_capacity')
|
831
887
|
missing_lcoe_source = [k for k in lcoe_recalc_req
|
832
888
|
if k not in gen_dsets]
|
@@ -837,61 +893,91 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
837
893
|
elif isinstance(gen, MultiFileResource):
|
838
894
|
source_fps = gen._h5_files
|
839
895
|
else:
|
840
|
-
msg =
|
841
|
-
|
896
|
+
msg = 'Did not recognize gen object input of type "{}": {}'.format(
|
897
|
+
type(gen), gen
|
898
|
+
)
|
842
899
|
logger.error(msg)
|
843
900
|
raise TypeError(msg)
|
844
901
|
|
845
902
|
h5_dsets_data = None
|
846
903
|
if h5_dsets is not None:
|
847
|
-
missing_lcoe_request = [
|
848
|
-
|
904
|
+
missing_lcoe_request = [
|
905
|
+
k for k in lcoe_recalc_req if k not in h5_dsets
|
906
|
+
]
|
849
907
|
|
850
908
|
if not isinstance(h5_dsets, (list, tuple)):
|
851
|
-
e = (
|
852
|
-
|
909
|
+
e = (
|
910
|
+
"Additional h5_dsets argument must be a list or tuple "
|
911
|
+
"but received: {} {}".format(type(h5_dsets), h5_dsets)
|
912
|
+
)
|
853
913
|
logger.error(e)
|
854
914
|
raise TypeError(e)
|
855
915
|
|
856
916
|
missing_h5_dsets = [k for k in h5_dsets if k not in gen_dsets]
|
857
917
|
if any(missing_h5_dsets):
|
858
|
-
msg = (
|
859
|
-
|
860
|
-
|
918
|
+
msg = (
|
919
|
+
'Could not find requested h5_dsets "{}" in '
|
920
|
+
"source files: {}. Available datasets: {}".format(
|
921
|
+
missing_h5_dsets, source_fps, gen_dsets
|
922
|
+
)
|
923
|
+
)
|
861
924
|
logger.error(msg)
|
862
925
|
raise FileInputError(msg)
|
863
926
|
|
864
927
|
h5_dsets_data = {dset: gen[dset] for dset in h5_dsets}
|
865
928
|
|
866
929
|
if any(missing_lcoe_source):
|
867
|
-
msg = (
|
868
|
-
|
869
|
-
|
870
|
-
|
871
|
-
|
872
|
-
|
930
|
+
msg = (
|
931
|
+
"Could not find the datasets in the gen source file that "
|
932
|
+
"are required to re-calculate the multi-year LCOE. If you "
|
933
|
+
"are running a multi-year job, it is strongly suggested "
|
934
|
+
"you pass through these datasets to re-calculate the LCOE "
|
935
|
+
"from the multi-year mean CF: {}".format(missing_lcoe_source)
|
936
|
+
)
|
873
937
|
logger.warning(msg)
|
874
938
|
warn(msg, InputWarning)
|
875
939
|
|
876
940
|
if any(missing_lcoe_request):
|
877
|
-
msg = (
|
878
|
-
|
879
|
-
|
880
|
-
|
941
|
+
msg = (
|
942
|
+
"It is strongly advised that you include the following "
|
943
|
+
"datasets in the h5_dsets request in order to re-calculate "
|
944
|
+
"the LCOE from the multi-year mean CF and AEP: {}".format(
|
945
|
+
missing_lcoe_request
|
946
|
+
)
|
947
|
+
)
|
881
948
|
logger.warning(msg)
|
882
949
|
warn(msg, InputWarning)
|
883
950
|
|
884
951
|
return h5_dsets_data
|
885
952
|
|
886
953
|
@classmethod
|
887
|
-
def run_serial(
|
888
|
-
|
889
|
-
|
890
|
-
|
891
|
-
|
892
|
-
|
893
|
-
|
894
|
-
|
954
|
+
def run_serial(
|
955
|
+
cls,
|
956
|
+
excl_fpath,
|
957
|
+
gen_fpath,
|
958
|
+
tm_dset,
|
959
|
+
gen_index,
|
960
|
+
econ_fpath=None,
|
961
|
+
excl_dict=None,
|
962
|
+
inclusion_mask=None,
|
963
|
+
area_filter_kernel="queen",
|
964
|
+
min_area=None,
|
965
|
+
resolution=64,
|
966
|
+
gids=None,
|
967
|
+
args=None,
|
968
|
+
res_class_dset=None,
|
969
|
+
res_class_bins=None,
|
970
|
+
cf_dset="cf_mean-means",
|
971
|
+
lcoe_dset="lcoe_fcr-means",
|
972
|
+
h5_dsets=None,
|
973
|
+
data_layers=None,
|
974
|
+
power_density=None,
|
975
|
+
friction_fpath=None,
|
976
|
+
friction_dset=None,
|
977
|
+
excl_area=None,
|
978
|
+
cap_cost_scale=None,
|
979
|
+
recalc_lcoe=True,
|
980
|
+
):
|
895
981
|
"""Standalone method to create agg summary - can be parallelized.
|
896
982
|
|
897
983
|
Parameters
|
@@ -1002,34 +1088,38 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1002
1088
|
|
1003
1089
|
slice_lookup = sc.get_slice_lookup(gids)
|
1004
1090
|
|
1005
|
-
logger.debug(
|
1006
|
-
|
1091
|
+
logger.debug(
|
1092
|
+
"Starting SupplyCurveAggregation serial with "
|
1093
|
+
"supply curve {} gids".format(len(gids))
|
1094
|
+
)
|
1007
1095
|
|
1008
1096
|
cls._check_inclusion_mask(inclusion_mask, gids, exclusion_shape)
|
1009
1097
|
|
1010
1098
|
# pre-extract handlers so they are not repeatedly initialized
|
1011
|
-
file_kwargs = {
|
1012
|
-
|
1013
|
-
|
1014
|
-
|
1015
|
-
|
1016
|
-
|
1017
|
-
|
1018
|
-
|
1019
|
-
|
1020
|
-
|
1021
|
-
|
1022
|
-
|
1023
|
-
|
1024
|
-
|
1099
|
+
file_kwargs = {
|
1100
|
+
"econ_fpath": econ_fpath,
|
1101
|
+
"data_layers": data_layers,
|
1102
|
+
"power_density": power_density,
|
1103
|
+
"excl_dict": excl_dict,
|
1104
|
+
"area_filter_kernel": area_filter_kernel,
|
1105
|
+
"min_area": min_area,
|
1106
|
+
"friction_fpath": friction_fpath,
|
1107
|
+
"friction_dset": friction_dset,
|
1108
|
+
}
|
1109
|
+
with SupplyCurveAggFileHandler(
|
1110
|
+
excl_fpath, gen_fpath, **file_kwargs
|
1111
|
+
) as fh:
|
1112
|
+
temp = cls._get_res_gen_lcoe_data(
|
1113
|
+
fh.gen, res_class_dset, res_class_bins, cf_dset, lcoe_dset
|
1114
|
+
)
|
1025
1115
|
res_data, res_class_bins, cf_data, lcoe_data = temp
|
1026
1116
|
h5_dsets_data = cls._get_extra_dsets(fh.gen, h5_dsets)
|
1027
1117
|
|
1028
1118
|
n_finished = 0
|
1029
1119
|
for gid in gids:
|
1030
1120
|
gid_inclusions = cls._get_gid_inclusion_mask(
|
1031
|
-
inclusion_mask, gid, slice_lookup,
|
1032
|
-
|
1121
|
+
inclusion_mask, gid, slice_lookup, resolution=resolution
|
1122
|
+
)
|
1033
1123
|
|
1034
1124
|
for ri, res_bin in enumerate(res_class_bins):
|
1035
1125
|
try:
|
@@ -1055,27 +1145,34 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1055
1145
|
close=False,
|
1056
1146
|
friction_layer=fh.friction_layer,
|
1057
1147
|
cap_cost_scale=cap_cost_scale,
|
1058
|
-
recalc_lcoe=recalc_lcoe
|
1148
|
+
recalc_lcoe=recalc_lcoe,
|
1149
|
+
)
|
1059
1150
|
|
1060
1151
|
except EmptySupplyCurvePointError:
|
1061
|
-
logger.debug(
|
1152
|
+
logger.debug("SC point {} is empty".format(gid))
|
1062
1153
|
else:
|
1063
|
-
pointsum[
|
1064
|
-
pointsum[
|
1065
|
-
|
1154
|
+
pointsum[SupplyCurveField.SC_POINT_GID] = gid
|
1155
|
+
pointsum[SupplyCurveField.SC_ROW_IND] = \
|
1156
|
+
points.loc[gid, 'row_ind']
|
1157
|
+
pointsum[SupplyCurveField.SC_COL_IND] = \
|
1158
|
+
points.loc[gid, 'col_ind']
|
1066
1159
|
pointsum['res_class'] = ri
|
1067
1160
|
|
1068
1161
|
summary.append(pointsum)
|
1069
|
-
logger.debug(
|
1070
|
-
|
1071
|
-
|
1162
|
+
logger.debug(
|
1163
|
+
"Serial aggregation completed gid {}: "
|
1164
|
+
"{} out of {} points complete".format(
|
1165
|
+
gid, n_finished, len(gids)
|
1166
|
+
)
|
1167
|
+
)
|
1072
1168
|
|
1073
1169
|
n_finished += 1
|
1074
1170
|
|
1075
1171
|
return summary
|
1076
1172
|
|
1077
|
-
def run_parallel(
|
1078
|
-
|
1173
|
+
def run_parallel(
|
1174
|
+
self, gen_fpath, args=None, max_workers=None, sites_per_worker=100
|
1175
|
+
):
|
1079
1176
|
"""Get the supply curve points aggregation summary using futures.
|
1080
1177
|
|
1081
1178
|
Parameters
|
@@ -1101,25 +1198,31 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1101
1198
|
chunks = int(np.ceil(len(self.gids) / sites_per_worker))
|
1102
1199
|
chunks = np.array_split(self.gids, chunks)
|
1103
1200
|
|
1104
|
-
logger.info(
|
1105
|
-
|
1106
|
-
|
1107
|
-
|
1108
|
-
|
1201
|
+
logger.info(
|
1202
|
+
"Running supply curve point aggregation for "
|
1203
|
+
"points {} through {} at a resolution of {} "
|
1204
|
+
"on {} cores in {} chunks.".format(
|
1205
|
+
self.gids[0],
|
1206
|
+
self.gids[-1],
|
1207
|
+
self._resolution,
|
1208
|
+
max_workers,
|
1209
|
+
len(chunks),
|
1210
|
+
)
|
1211
|
+
)
|
1109
1212
|
|
1110
1213
|
slice_lookup = None
|
1111
1214
|
if self._inclusion_mask is not None:
|
1112
|
-
with SupplyCurveExtent(
|
1113
|
-
|
1215
|
+
with SupplyCurveExtent(
|
1216
|
+
self._excl_fpath, resolution=self._resolution
|
1217
|
+
) as sc:
|
1114
1218
|
assert sc.exclusions.shape == self._inclusion_mask.shape
|
1115
1219
|
slice_lookup = sc.get_slice_lookup(self.gids)
|
1116
1220
|
|
1117
1221
|
futures = []
|
1118
1222
|
summary = []
|
1119
1223
|
n_finished = 0
|
1120
|
-
loggers = [__name__,
|
1224
|
+
loggers = [__name__, "reV.supply_curve.point_summary", "reV"]
|
1121
1225
|
with SpawnProcessPool(max_workers=max_workers, loggers=loggers) as exe:
|
1122
|
-
|
1123
1226
|
# iterate through split executions, submitting each to worker
|
1124
1227
|
for gid_set in chunks:
|
1125
1228
|
# submit executions and append to futures list
|
@@ -1130,30 +1233,35 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1130
1233
|
rs, cs = slice_lookup[gid]
|
1131
1234
|
chunk_incl_masks[gid] = self._inclusion_mask[rs, cs]
|
1132
1235
|
|
1133
|
-
futures.append(
|
1134
|
-
|
1135
|
-
|
1136
|
-
|
1137
|
-
|
1138
|
-
|
1139
|
-
|
1140
|
-
|
1141
|
-
|
1142
|
-
|
1143
|
-
|
1144
|
-
|
1145
|
-
|
1146
|
-
|
1147
|
-
|
1148
|
-
|
1149
|
-
|
1150
|
-
|
1151
|
-
|
1152
|
-
|
1153
|
-
|
1154
|
-
|
1155
|
-
|
1156
|
-
|
1236
|
+
futures.append(
|
1237
|
+
exe.submit(
|
1238
|
+
self.run_serial,
|
1239
|
+
self._excl_fpath,
|
1240
|
+
gen_fpath,
|
1241
|
+
self._tm_dset,
|
1242
|
+
gen_index,
|
1243
|
+
econ_fpath=self._econ_fpath,
|
1244
|
+
excl_dict=self._excl_dict,
|
1245
|
+
inclusion_mask=chunk_incl_masks,
|
1246
|
+
res_class_dset=self._res_class_dset,
|
1247
|
+
res_class_bins=self._res_class_bins,
|
1248
|
+
cf_dset=self._cf_dset,
|
1249
|
+
lcoe_dset=self._lcoe_dset,
|
1250
|
+
h5_dsets=self._h5_dsets,
|
1251
|
+
data_layers=self._data_layers,
|
1252
|
+
resolution=self._resolution,
|
1253
|
+
power_density=self._power_density,
|
1254
|
+
friction_fpath=self._friction_fpath,
|
1255
|
+
friction_dset=self._friction_dset,
|
1256
|
+
area_filter_kernel=self._area_filter_kernel,
|
1257
|
+
min_area=self._min_area,
|
1258
|
+
gids=gid_set,
|
1259
|
+
args=args,
|
1260
|
+
excl_area=self._excl_area,
|
1261
|
+
cap_cost_scale=self._cap_cost_scale,
|
1262
|
+
recalc_lcoe=self._recalc_lcoe,
|
1263
|
+
)
|
1264
|
+
)
|
1157
1265
|
|
1158
1266
|
# gather results
|
1159
1267
|
for future in as_completed(futures):
|
@@ -1161,12 +1269,17 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1161
1269
|
summary += future.result()
|
1162
1270
|
if n_finished % 10 == 0:
|
1163
1271
|
mem = psutil.virtual_memory()
|
1164
|
-
logger.info(
|
1165
|
-
|
1166
|
-
|
1167
|
-
|
1168
|
-
|
1169
|
-
|
1272
|
+
logger.info(
|
1273
|
+
"Parallel aggregation futures collected: "
|
1274
|
+
"{} out of {}. Memory usage is {:.3f} GB out "
|
1275
|
+
"of {:.3f} GB ({:.2f}% utilized).".format(
|
1276
|
+
n_finished,
|
1277
|
+
len(chunks),
|
1278
|
+
mem.used / 1e9,
|
1279
|
+
mem.total / 1e9,
|
1280
|
+
100 * mem.used / mem.total,
|
1281
|
+
)
|
1282
|
+
)
|
1170
1283
|
|
1171
1284
|
return summary
|
1172
1285
|
|
@@ -1194,17 +1307,18 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1194
1307
|
if all(type_check):
|
1195
1308
|
return bins
|
1196
1309
|
|
1197
|
-
|
1198
|
-
raise TypeError(
|
1199
|
-
|
1310
|
+
if any(type_check):
|
1311
|
+
raise TypeError(
|
1312
|
+
"Resource class bins has inconsistent "
|
1313
|
+
"entry type: {}".format(bins)
|
1314
|
+
)
|
1200
1315
|
|
1201
|
-
|
1202
|
-
|
1203
|
-
|
1204
|
-
|
1205
|
-
bbins.append([b, bins[i + 1]])
|
1316
|
+
bbins = []
|
1317
|
+
for i, b in enumerate(sorted(bins)):
|
1318
|
+
if i < len(bins) - 1:
|
1319
|
+
bbins.append([b, bins[i + 1]])
|
1206
1320
|
|
1207
|
-
|
1321
|
+
return bbins
|
1208
1322
|
|
1209
1323
|
@staticmethod
|
1210
1324
|
def _summary_to_df(summary):
|
@@ -1221,15 +1335,17 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1221
1335
|
Summary of the SC points.
|
1222
1336
|
"""
|
1223
1337
|
summary = pd.DataFrame(summary)
|
1224
|
-
sort_by = [x for x in (
|
1338
|
+
sort_by = [x for x in (SupplyCurveField.SC_POINT_GID, 'res_class')
|
1339
|
+
if x in summary]
|
1225
1340
|
summary = summary.sort_values(sort_by)
|
1226
1341
|
summary = summary.reset_index(drop=True)
|
1227
|
-
summary.index.name =
|
1342
|
+
summary.index.name = SupplyCurveField.SC_GID
|
1228
1343
|
|
1229
1344
|
return summary
|
1230
1345
|
|
1231
|
-
def summarize(
|
1232
|
-
|
1346
|
+
def summarize(
|
1347
|
+
self, gen_fpath, args=None, max_workers=None, sites_per_worker=100
|
1348
|
+
):
|
1233
1349
|
"""
|
1234
1350
|
Get the supply curve points aggregation summary
|
1235
1351
|
|
@@ -1257,35 +1373,45 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1257
1373
|
if max_workers == 1:
|
1258
1374
|
gen_index = self._parse_gen_index(gen_fpath)
|
1259
1375
|
afk = self._area_filter_kernel
|
1260
|
-
summary = self.run_serial(
|
1261
|
-
|
1262
|
-
|
1263
|
-
|
1264
|
-
|
1265
|
-
|
1266
|
-
|
1267
|
-
|
1268
|
-
|
1269
|
-
|
1270
|
-
|
1271
|
-
|
1272
|
-
|
1273
|
-
|
1274
|
-
|
1275
|
-
|
1276
|
-
|
1277
|
-
|
1278
|
-
|
1279
|
-
|
1280
|
-
|
1376
|
+
summary = self.run_serial(
|
1377
|
+
self._excl_fpath,
|
1378
|
+
gen_fpath,
|
1379
|
+
self._tm_dset,
|
1380
|
+
gen_index,
|
1381
|
+
econ_fpath=self._econ_fpath,
|
1382
|
+
excl_dict=self._excl_dict,
|
1383
|
+
inclusion_mask=self._inclusion_mask,
|
1384
|
+
res_class_dset=self._res_class_dset,
|
1385
|
+
res_class_bins=self._res_class_bins,
|
1386
|
+
cf_dset=self._cf_dset,
|
1387
|
+
lcoe_dset=self._lcoe_dset,
|
1388
|
+
h5_dsets=self._h5_dsets,
|
1389
|
+
data_layers=self._data_layers,
|
1390
|
+
resolution=self._resolution,
|
1391
|
+
power_density=self._power_density,
|
1392
|
+
friction_fpath=self._friction_fpath,
|
1393
|
+
friction_dset=self._friction_dset,
|
1394
|
+
area_filter_kernel=afk,
|
1395
|
+
min_area=self._min_area,
|
1396
|
+
gids=self.gids,
|
1397
|
+
args=args,
|
1398
|
+
excl_area=self._excl_area,
|
1399
|
+
cap_cost_scale=self._cap_cost_scale,
|
1400
|
+
recalc_lcoe=self._recalc_lcoe,
|
1401
|
+
)
|
1281
1402
|
else:
|
1282
|
-
summary = self.run_parallel(
|
1283
|
-
|
1284
|
-
|
1403
|
+
summary = self.run_parallel(
|
1404
|
+
gen_fpath=gen_fpath,
|
1405
|
+
args=args,
|
1406
|
+
max_workers=max_workers,
|
1407
|
+
sites_per_worker=sites_per_worker,
|
1408
|
+
)
|
1285
1409
|
|
1286
1410
|
if not any(summary):
|
1287
|
-
e = (
|
1288
|
-
|
1411
|
+
e = (
|
1412
|
+
"Supply curve aggregation found no non-excluded SC points. "
|
1413
|
+
"Please check your exclusions or subset SC GID selection."
|
1414
|
+
)
|
1289
1415
|
logger.error(e)
|
1290
1416
|
raise EmptySupplyCurvePointError(e)
|
1291
1417
|
|
@@ -1293,8 +1419,14 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1293
1419
|
|
1294
1420
|
return summary
|
1295
1421
|
|
1296
|
-
def run(
|
1297
|
-
|
1422
|
+
def run(
|
1423
|
+
self,
|
1424
|
+
out_fpath,
|
1425
|
+
gen_fpath=None,
|
1426
|
+
args=None,
|
1427
|
+
max_workers=None,
|
1428
|
+
sites_per_worker=100,
|
1429
|
+
):
|
1298
1430
|
"""Run a supply curve aggregation.
|
1299
1431
|
|
1300
1432
|
Parameters
|
@@ -1333,7 +1465,9 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1333
1465
|
|
1334
1466
|
if gen_fpath is None:
|
1335
1467
|
out = Aggregation.run(
|
1336
|
-
self._excl_fpath,
|
1468
|
+
self._excl_fpath,
|
1469
|
+
self._res_fpath,
|
1470
|
+
self._tm_dset,
|
1337
1471
|
excl_dict=self._excl_dict,
|
1338
1472
|
resolution=self._resolution,
|
1339
1473
|
excl_area=self._excl_area,
|
@@ -1341,12 +1475,16 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1341
1475
|
min_area=self._min_area,
|
1342
1476
|
pre_extract_inclusions=self._pre_extract_inclusions,
|
1343
1477
|
max_workers=max_workers,
|
1344
|
-
sites_per_worker=sites_per_worker
|
1345
|
-
|
1478
|
+
sites_per_worker=sites_per_worker,
|
1479
|
+
)
|
1480
|
+
summary = out["meta"]
|
1346
1481
|
else:
|
1347
|
-
summary = self.summarize(
|
1348
|
-
|
1349
|
-
|
1482
|
+
summary = self.summarize(
|
1483
|
+
gen_fpath=gen_fpath,
|
1484
|
+
args=args,
|
1485
|
+
max_workers=max_workers,
|
1486
|
+
sites_per_worker=sites_per_worker,
|
1487
|
+
)
|
1350
1488
|
|
1351
1489
|
out_fpath = _format_sc_agg_out_fpath(out_fpath)
|
1352
1490
|
summary.to_csv(out_fpath)
|
@@ -1357,11 +1495,12 @@ class SupplyCurveAggregation(BaseAggregation):
|
|
1357
1495
|
def _format_sc_agg_out_fpath(out_fpath):
|
1358
1496
|
"""Add CSV file ending and replace underscore, if necessary."""
|
1359
1497
|
if not out_fpath.endswith(".csv"):
|
1360
|
-
out_fpath =
|
1498
|
+
out_fpath = "{}.csv".format(out_fpath)
|
1361
1499
|
|
1362
1500
|
project_dir, out_fn = os.path.split(out_fpath)
|
1363
|
-
out_fn = out_fn.replace(
|
1364
|
-
|
1501
|
+
out_fn = out_fn.replace(
|
1502
|
+
"supply_curve_aggregation", "supply-curve-aggregation"
|
1503
|
+
)
|
1365
1504
|
return os.path.join(project_dir, out_fn)
|
1366
1505
|
|
1367
1506
|
|
@@ -1369,9 +1508,11 @@ def _warn_about_large_datasets(gen, dset):
|
|
1369
1508
|
"""Warn user about multi-dimensional datasets in passthrough datasets"""
|
1370
1509
|
dset_shape = gen.shapes.get(dset, (1,))
|
1371
1510
|
if len(dset_shape) > 1:
|
1372
|
-
msg = (
|
1373
|
-
|
1374
|
-
|
1375
|
-
|
1511
|
+
msg = (
|
1512
|
+
"Generation dataset {!r} is not 1-dimensional (shape: {})."
|
1513
|
+
"You may run into memory errors during aggregation - use "
|
1514
|
+
"rep-profiles for aggregating higher-order datasets instead!"
|
1515
|
+
.format(dset, dset_shape)
|
1516
|
+
)
|
1376
1517
|
logger.warning(msg)
|
1377
1518
|
warn(msg, UserWarning)
|