NREL-reV 0.8.7__py3-none-any.whl → 0.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/METADATA +13 -10
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/RECORD +43 -43
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/WHEEL +1 -1
- reV/SAM/SAM.py +217 -133
- reV/SAM/econ.py +18 -14
- reV/SAM/generation.py +611 -422
- reV/SAM/windbos.py +93 -79
- reV/bespoke/bespoke.py +681 -377
- reV/bespoke/cli_bespoke.py +2 -0
- reV/bespoke/place_turbines.py +187 -43
- reV/config/output_request.py +2 -1
- reV/config/project_points.py +218 -140
- reV/econ/econ.py +166 -114
- reV/econ/economies_of_scale.py +91 -45
- reV/generation/base.py +331 -184
- reV/generation/generation.py +326 -200
- reV/generation/output_attributes/lcoe_fcr_inputs.json +38 -3
- reV/handlers/__init__.py +0 -1
- reV/handlers/exclusions.py +16 -15
- reV/handlers/multi_year.py +57 -26
- reV/handlers/outputs.py +6 -5
- reV/handlers/transmission.py +44 -27
- reV/hybrids/hybrid_methods.py +30 -30
- reV/hybrids/hybrids.py +305 -189
- reV/nrwal/nrwal.py +262 -168
- reV/qa_qc/cli_qa_qc.py +14 -10
- reV/qa_qc/qa_qc.py +217 -119
- reV/qa_qc/summary.py +228 -146
- reV/rep_profiles/rep_profiles.py +349 -230
- reV/supply_curve/aggregation.py +349 -188
- reV/supply_curve/competitive_wind_farms.py +90 -48
- reV/supply_curve/exclusions.py +138 -85
- reV/supply_curve/extent.py +75 -50
- reV/supply_curve/points.py +735 -390
- reV/supply_curve/sc_aggregation.py +357 -248
- reV/supply_curve/supply_curve.py +604 -347
- reV/supply_curve/tech_mapping.py +144 -82
- reV/utilities/__init__.py +274 -16
- reV/utilities/pytest_utils.py +8 -4
- reV/version.py +1 -1
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/LICENSE +0 -0
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/entry_points.txt +0 -0
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/top_level.txt +0 -0
reV/generation/base.py
CHANGED
@@ -2,46 +2,64 @@
|
|
2
2
|
"""
|
3
3
|
reV base gen and econ module.
|
4
4
|
"""
|
5
|
-
from abc import ABC, abstractmethod
|
6
5
|
import copy
|
7
|
-
|
6
|
+
import json
|
8
7
|
import logging
|
9
|
-
import pandas as pd
|
10
|
-
import numpy as np
|
11
8
|
import os
|
12
|
-
import psutil
|
13
|
-
import json
|
14
9
|
import sys
|
10
|
+
from abc import ABC, abstractmethod
|
11
|
+
from concurrent.futures import TimeoutError
|
15
12
|
from warnings import warn
|
16
13
|
|
14
|
+
import numpy as np
|
15
|
+
import pandas as pd
|
16
|
+
import psutil
|
17
|
+
from rex.resource import Resource
|
18
|
+
from rex.utilities.execution import SpawnProcessPool
|
19
|
+
|
17
20
|
from reV.config.output_request import SAMOutputRequest
|
18
|
-
from reV.config.project_points import
|
21
|
+
from reV.config.project_points import PointsControl, ProjectPoints
|
19
22
|
from reV.handlers.outputs import Outputs
|
20
23
|
from reV.SAM.version_checker import PySamVersionChecker
|
21
|
-
from reV.utilities
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
24
|
+
from reV.utilities import ModuleName, ResourceMetaField, log_versions
|
25
|
+
from reV.utilities.exceptions import (
|
26
|
+
ExecutionError,
|
27
|
+
OffshoreWindInputWarning,
|
28
|
+
OutputWarning,
|
29
|
+
ParallelExecutionWarning,
|
30
|
+
)
|
28
31
|
|
29
32
|
logger = logging.getLogger(__name__)
|
30
33
|
|
31
34
|
|
32
35
|
ATTR_DIR = os.path.dirname(os.path.realpath(__file__))
|
33
36
|
ATTR_DIR = os.path.join(ATTR_DIR, 'output_attributes')
|
34
|
-
with open(os.path.join(ATTR_DIR, 'other.json')
|
37
|
+
with open(os.path.join(ATTR_DIR, 'other.json')) as f:
|
35
38
|
OTHER_ATTRS = json.load(f)
|
36
|
-
with open(os.path.join(ATTR_DIR, 'lcoe_fcr.json')
|
39
|
+
with open(os.path.join(ATTR_DIR, 'lcoe_fcr.json')) as f:
|
37
40
|
LCOE_ATTRS = json.load(f)
|
38
|
-
with open(os.path.join(ATTR_DIR, 'single_owner.json')
|
41
|
+
with open(os.path.join(ATTR_DIR, 'single_owner.json')) as f:
|
39
42
|
SO_ATTRS = json.load(f)
|
40
|
-
with open(os.path.join(ATTR_DIR, 'windbos.json')
|
43
|
+
with open(os.path.join(ATTR_DIR, 'windbos.json')) as f:
|
41
44
|
BOS_ATTRS = json.load(f)
|
42
|
-
with open(os.path.join(ATTR_DIR, 'lcoe_fcr_inputs.json')
|
45
|
+
with open(os.path.join(ATTR_DIR, 'lcoe_fcr_inputs.json')) as f:
|
43
46
|
LCOE_IN_ATTRS = json.load(f)
|
44
47
|
|
48
|
+
LCOE_REQUIRED_OUTPUTS = ("system_capacity", "capital_cost_multiplier",
|
49
|
+
"capital_cost", "fixed_operating_cost",
|
50
|
+
"variable_operating_cost", "base_capital_cost",
|
51
|
+
"base_fixed_operating_cost",
|
52
|
+
"base_variable_operating_cost", "fixed_charge_rate")
|
53
|
+
"""Required econ outputs in generation file."""
|
54
|
+
|
55
|
+
|
56
|
+
def _add_lcoe_outputs(output_request):
|
57
|
+
"""Add required lcoe outputs to output request. """
|
58
|
+
for out_var in LCOE_REQUIRED_OUTPUTS:
|
59
|
+
if out_var not in output_request:
|
60
|
+
output_request.append(out_var)
|
61
|
+
return output_request
|
62
|
+
|
45
63
|
|
46
64
|
class BaseGen(ABC):
|
47
65
|
"""Base class for reV gen and econ classes to run SAM simulations."""
|
@@ -65,12 +83,19 @@ class BaseGen(ABC):
|
|
65
83
|
# SAM argument names used to calculate LCOE
|
66
84
|
# Note that system_capacity is not included here because it is never used
|
67
85
|
# downstream and could be confused with the supply_curve point capacity
|
68
|
-
LCOE_ARGS = ('fixed_charge_rate', 'capital_cost',
|
86
|
+
LCOE_ARGS = ('fixed_charge_rate', 'capital_cost',
|
87
|
+
'fixed_operating_cost',
|
69
88
|
'variable_operating_cost')
|
70
89
|
|
71
|
-
def __init__(
|
72
|
-
|
73
|
-
|
90
|
+
def __init__(
|
91
|
+
self,
|
92
|
+
points_control,
|
93
|
+
output_request,
|
94
|
+
site_data=None,
|
95
|
+
drop_leap=False,
|
96
|
+
memory_utilization_limit=0.4,
|
97
|
+
scale_outputs=True,
|
98
|
+
):
|
74
99
|
"""
|
75
100
|
Parameters
|
76
101
|
----------
|
@@ -106,11 +131,13 @@ class BaseGen(ABC):
|
|
106
131
|
self.mem_util_lim = memory_utilization_limit
|
107
132
|
self.scale_outputs = scale_outputs
|
108
133
|
|
109
|
-
self._run_attrs = {
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
134
|
+
self._run_attrs = {
|
135
|
+
"points_control": str(points_control),
|
136
|
+
"output_request": output_request,
|
137
|
+
"site_data": str(site_data),
|
138
|
+
"drop_leap": str(drop_leap),
|
139
|
+
"memory_utilization_limit": self.mem_util_lim,
|
140
|
+
}
|
114
141
|
|
115
142
|
self._site_data = self._parse_site_data(site_data)
|
116
143
|
self.add_site_data_to_pp(self._site_data)
|
@@ -174,11 +201,16 @@ class BaseGen(ABC):
|
|
174
201
|
tot_mem = psutil.virtual_memory().total / 1e6
|
175
202
|
avail_mem = self.mem_util_lim * tot_mem
|
176
203
|
self._site_limit = int(np.floor(avail_mem / self.site_mem))
|
177
|
-
logger.info(
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
204
|
+
logger.info(
|
205
|
+
"Limited to storing {0} sites in memory "
|
206
|
+
"({1:.1f} GB total hardware, {2:.1f} GB available "
|
207
|
+
"with {3:.1f}% utilization).".format(
|
208
|
+
self._site_limit,
|
209
|
+
tot_mem / 1e3,
|
210
|
+
avail_mem / 1e3,
|
211
|
+
self.mem_util_lim * 100,
|
212
|
+
)
|
213
|
+
)
|
182
214
|
|
183
215
|
return self._site_limit
|
184
216
|
|
@@ -199,17 +231,18 @@ class BaseGen(ABC):
|
|
199
231
|
n = 100
|
200
232
|
self._site_mem = 0
|
201
233
|
for request in self.output_request:
|
202
|
-
dtype =
|
234
|
+
dtype = "float32"
|
203
235
|
if request in self.OUT_ATTRS:
|
204
|
-
dtype = self.OUT_ATTRS[request].get(
|
236
|
+
dtype = self.OUT_ATTRS[request].get("dtype", "float32")
|
205
237
|
|
206
238
|
shape = self._get_data_shape(request, n)
|
207
239
|
self._site_mem += sys.getsizeof(np.ones(shape, dtype=dtype))
|
208
240
|
|
209
241
|
self._site_mem = self._site_mem / 1e6 / n
|
210
|
-
logger.info(
|
211
|
-
|
212
|
-
|
242
|
+
logger.info(
|
243
|
+
"Output results from a single site are calculated to "
|
244
|
+
"use {0:.1f} KB of memory.".format(self._site_mem / 1000)
|
245
|
+
)
|
213
246
|
|
214
247
|
return self._site_mem
|
215
248
|
|
@@ -259,7 +292,7 @@ class BaseGen(ABC):
|
|
259
292
|
"""
|
260
293
|
sam_metas = self.sam_configs.copy()
|
261
294
|
for v in sam_metas.values():
|
262
|
-
v.update({
|
295
|
+
v.update({"module": self._sam_module.MODULE})
|
263
296
|
|
264
297
|
return sam_metas
|
265
298
|
|
@@ -284,7 +317,8 @@ class BaseGen(ABC):
|
|
284
317
|
Meta data df for sites in project points. Column names are meta
|
285
318
|
data variables, rows are different sites. The row index
|
286
319
|
does not indicate the site number if the project points are
|
287
|
-
non-sequential or do not start from 0, so a
|
320
|
+
non-sequential or do not start from 0, so a `SiteDataField.GID`
|
321
|
+
column is added.
|
288
322
|
"""
|
289
323
|
return self._meta
|
290
324
|
|
@@ -351,12 +385,12 @@ class BaseGen(ABC):
|
|
351
385
|
out = {}
|
352
386
|
for k, v in self._out.items():
|
353
387
|
if k in self.OUT_ATTRS:
|
354
|
-
scale_factor = self.OUT_ATTRS[k].get(
|
388
|
+
scale_factor = self.OUT_ATTRS[k].get("scale_factor", 1)
|
355
389
|
else:
|
356
390
|
scale_factor = 1
|
357
391
|
|
358
392
|
if scale_factor != 1 and self.scale_outputs:
|
359
|
-
v = v.astype(
|
393
|
+
v = v.astype("float32")
|
360
394
|
v /= scale_factor
|
361
395
|
|
362
396
|
out[k] = v
|
@@ -381,16 +415,14 @@ class BaseGen(ABC):
|
|
381
415
|
result = self.unpack_futures(result)
|
382
416
|
|
383
417
|
if isinstance(result, dict):
|
384
|
-
|
385
418
|
# iterate through dict where sites are keys and values are
|
386
419
|
# corresponding results
|
387
420
|
for site_gid, site_output in result.items():
|
388
|
-
|
389
421
|
# check that the sites are stored sequentially then add to
|
390
422
|
# the finished site list
|
391
423
|
if self._finished_sites:
|
392
424
|
if int(site_gid) < np.max(self._finished_sites):
|
393
|
-
raise Exception(
|
425
|
+
raise Exception("Site results are non sequential!")
|
394
426
|
|
395
427
|
# unpack site output object
|
396
428
|
self.unpack_output(site_gid, site_output)
|
@@ -402,9 +434,11 @@ class BaseGen(ABC):
|
|
402
434
|
self._out.clear()
|
403
435
|
self._finished_sites.clear()
|
404
436
|
else:
|
405
|
-
raise TypeError(
|
406
|
-
|
407
|
-
|
437
|
+
raise TypeError(
|
438
|
+
"Did not recognize the type of output. "
|
439
|
+
'Tried to set output type "{}", but requires '
|
440
|
+
"list, dict or None.".format(type(result))
|
441
|
+
)
|
408
442
|
|
409
443
|
@staticmethod
|
410
444
|
def _output_request_type_check(req):
|
@@ -428,8 +462,10 @@ class BaseGen(ABC):
|
|
428
462
|
elif isinstance(req, str):
|
429
463
|
output_request = [req]
|
430
464
|
else:
|
431
|
-
raise TypeError(
|
432
|
-
|
465
|
+
raise TypeError(
|
466
|
+
"Output request must be str, list, or tuple but "
|
467
|
+
"received: {}".format(type(req))
|
468
|
+
)
|
433
469
|
|
434
470
|
return output_request
|
435
471
|
|
@@ -452,7 +488,7 @@ class BaseGen(ABC):
|
|
452
488
|
"""
|
453
489
|
|
454
490
|
# Drop leap day or last day
|
455
|
-
leap_day = (
|
491
|
+
leap_day = (ti.month == 2) & (ti.day == 29)
|
456
492
|
leap_year = ti.year % 4 == 0
|
457
493
|
last_day = ((ti.month == 12) & (ti.day == 31)) * leap_year
|
458
494
|
if drop_leap:
|
@@ -463,14 +499,23 @@ class BaseGen(ABC):
|
|
463
499
|
ti = ti.drop(ti[last_day])
|
464
500
|
|
465
501
|
if len(ti) % 365 != 0:
|
466
|
-
raise ValueError(
|
467
|
-
|
502
|
+
raise ValueError(
|
503
|
+
"Bad time index with length not a multiple of "
|
504
|
+
"365: {}".format(ti)
|
505
|
+
)
|
468
506
|
|
469
507
|
return ti
|
470
508
|
|
471
509
|
@staticmethod
|
472
|
-
def _pp_to_pc(
|
473
|
-
|
510
|
+
def _pp_to_pc(
|
511
|
+
points,
|
512
|
+
points_range,
|
513
|
+
sam_configs,
|
514
|
+
tech,
|
515
|
+
sites_per_worker=None,
|
516
|
+
res_file=None,
|
517
|
+
curtailment=None,
|
518
|
+
):
|
474
519
|
"""
|
475
520
|
Create ProjectControl from ProjectPoints
|
476
521
|
|
@@ -519,16 +564,25 @@ class BaseGen(ABC):
|
|
519
564
|
if hasattr(points, "df"):
|
520
565
|
points = points.df
|
521
566
|
|
522
|
-
pp = ProjectPoints(
|
523
|
-
|
567
|
+
pp = ProjectPoints(
|
568
|
+
points,
|
569
|
+
sam_configs,
|
570
|
+
tech=tech,
|
571
|
+
res_file=res_file,
|
572
|
+
curtailment=curtailment,
|
573
|
+
)
|
524
574
|
|
525
575
|
# make Points Control instance
|
526
576
|
if points_range is not None:
|
527
577
|
# PointsControl is for just a subset of the project points...
|
528
578
|
# this is the case if generation is being initialized on one
|
529
579
|
# of many HPC nodes in a large project
|
530
|
-
pc = PointsControl.split(
|
531
|
-
|
580
|
+
pc = PointsControl.split(
|
581
|
+
points_range[0],
|
582
|
+
points_range[1],
|
583
|
+
pp,
|
584
|
+
sites_per_split=sites_per_worker,
|
585
|
+
)
|
532
586
|
else:
|
533
587
|
# PointsControl is for all of the project points
|
534
588
|
pc = PointsControl(pp, sites_per_split=sites_per_worker)
|
@@ -536,8 +590,16 @@ class BaseGen(ABC):
|
|
536
590
|
return pc
|
537
591
|
|
538
592
|
@classmethod
|
539
|
-
def get_pc(
|
540
|
-
|
593
|
+
def get_pc(
|
594
|
+
cls,
|
595
|
+
points,
|
596
|
+
points_range,
|
597
|
+
sam_configs,
|
598
|
+
tech,
|
599
|
+
sites_per_worker=None,
|
600
|
+
res_file=None,
|
601
|
+
curtailment=None,
|
602
|
+
):
|
541
603
|
"""Get a PointsControl instance.
|
542
604
|
|
543
605
|
Parameters
|
@@ -585,9 +647,12 @@ class BaseGen(ABC):
|
|
585
647
|
"""
|
586
648
|
|
587
649
|
if tech not in cls.OPTIONS and tech.lower() != ModuleName.ECON:
|
588
|
-
msg = (
|
589
|
-
|
590
|
-
|
650
|
+
msg = (
|
651
|
+
'Did not recognize reV-SAM technology string "{}". '
|
652
|
+
"Technology string options are: {}".format(
|
653
|
+
tech, list(cls.OPTIONS.keys())
|
654
|
+
)
|
655
|
+
)
|
591
656
|
logger.error(msg)
|
592
657
|
raise KeyError(msg)
|
593
658
|
|
@@ -595,16 +660,25 @@ class BaseGen(ABC):
|
|
595
660
|
# get the optimal sites per split based on res file chunk size
|
596
661
|
sites_per_worker = cls.get_sites_per_worker(res_file)
|
597
662
|
|
598
|
-
logger.debug(
|
599
|
-
|
663
|
+
logger.debug(
|
664
|
+
"Sites per worker being set to {} for " "PointsControl.".format(
|
665
|
+
sites_per_worker
|
666
|
+
)
|
667
|
+
)
|
600
668
|
|
601
669
|
if isinstance(points, PointsControl):
|
602
670
|
# received a pre-intialized instance of pointscontrol
|
603
671
|
pc = points
|
604
672
|
else:
|
605
|
-
pc = cls._pp_to_pc(
|
606
|
-
|
607
|
-
|
673
|
+
pc = cls._pp_to_pc(
|
674
|
+
points,
|
675
|
+
points_range,
|
676
|
+
sam_configs,
|
677
|
+
tech,
|
678
|
+
sites_per_worker=sites_per_worker,
|
679
|
+
res_file=res_file,
|
680
|
+
curtailment=curtailment,
|
681
|
+
)
|
608
682
|
|
609
683
|
return pc
|
610
684
|
|
@@ -637,31 +711,37 @@ class BaseGen(ABC):
|
|
637
711
|
return default
|
638
712
|
|
639
713
|
with Resource(res_file) as res:
|
640
|
-
if
|
714
|
+
if "wtk" in res_file.lower():
|
641
715
|
for dset in res.datasets:
|
642
|
-
if
|
716
|
+
if "speed" in dset:
|
643
717
|
# take nominal WTK chunks from windspeed
|
644
718
|
_, _, chunks = res.get_dset_properties(dset)
|
645
719
|
break
|
646
|
-
elif
|
720
|
+
elif "nsrdb" in res_file.lower():
|
647
721
|
# take nominal NSRDB chunks from dni
|
648
|
-
_, _, chunks = res.get_dset_properties(
|
722
|
+
_, _, chunks = res.get_dset_properties("dni")
|
649
723
|
else:
|
650
|
-
warn(
|
651
|
-
|
652
|
-
|
724
|
+
warn(
|
725
|
+
"Could not infer dataset chunk size as the resource type "
|
726
|
+
"could not be determined from the filename: {}".format(
|
727
|
+
res_file
|
728
|
+
)
|
729
|
+
)
|
653
730
|
chunks = None
|
654
731
|
|
655
732
|
if chunks is None:
|
656
733
|
# if chunks not set, go to default
|
657
734
|
sites_per_worker = default
|
658
|
-
logger.debug(
|
659
|
-
|
660
|
-
|
735
|
+
logger.debug(
|
736
|
+
"Sites per worker being set to {} (default) based on "
|
737
|
+
"no set chunk size in {}.".format(sites_per_worker, res_file)
|
738
|
+
)
|
661
739
|
else:
|
662
740
|
sites_per_worker = chunks[1]
|
663
|
-
logger.debug(
|
664
|
-
|
741
|
+
logger.debug(
|
742
|
+
"Sites per worker being set to {} based on chunk "
|
743
|
+
"size of {}.".format(sites_per_worker, res_file)
|
744
|
+
)
|
665
745
|
|
666
746
|
return sites_per_worker
|
667
747
|
|
@@ -688,8 +768,13 @@ class BaseGen(ABC):
|
|
688
768
|
|
689
769
|
@staticmethod
|
690
770
|
@abstractmethod
|
691
|
-
def _run_single_worker(
|
692
|
-
|
771
|
+
def _run_single_worker(
|
772
|
+
points_control,
|
773
|
+
tech=None,
|
774
|
+
res_file=None,
|
775
|
+
output_request=None,
|
776
|
+
scale_outputs=True,
|
777
|
+
):
|
693
778
|
"""Run a reV-SAM analysis based on the points_control iterator.
|
694
779
|
|
695
780
|
Parameters
|
@@ -735,31 +820,37 @@ class BaseGen(ABC):
|
|
735
820
|
if inp is None or inp is False:
|
736
821
|
# no input, just initialize dataframe with site gids as index
|
737
822
|
site_data = pd.DataFrame(index=self.project_points.sites)
|
738
|
-
site_data.index.name =
|
823
|
+
site_data.index.name = ResourceMetaField.GID
|
739
824
|
else:
|
740
825
|
# explicit input, initialize df
|
741
826
|
if isinstance(inp, str):
|
742
|
-
if inp.endswith(
|
827
|
+
if inp.endswith(".csv"):
|
743
828
|
site_data = pd.read_csv(inp)
|
744
829
|
elif isinstance(inp, pd.DataFrame):
|
745
830
|
site_data = inp
|
746
831
|
else:
|
747
832
|
# site data was not able to be set. Raise error.
|
748
|
-
raise Exception(
|
749
|
-
|
750
|
-
|
751
|
-
|
833
|
+
raise Exception(
|
834
|
+
"Site data input must be .csv or "
|
835
|
+
"dataframe, but received: {}".format(inp)
|
836
|
+
)
|
837
|
+
|
838
|
+
gid_not_in_site_data = ResourceMetaField.GID not in site_data
|
839
|
+
index_name_not_gid = site_data.index.name != ResourceMetaField.GID
|
840
|
+
if gid_not_in_site_data and index_name_not_gid:
|
752
841
|
# require gid as column label or index
|
753
|
-
raise KeyError('Site data input must have
|
754
|
-
'to match
|
842
|
+
raise KeyError('Site data input must have '
|
843
|
+
f'{ResourceMetaField.GID} column to match '
|
844
|
+
'reV site gid.')
|
755
845
|
|
756
846
|
# pylint: disable=no-member
|
757
|
-
if site_data.index.name !=
|
847
|
+
if site_data.index.name != ResourceMetaField.GID:
|
758
848
|
# make gid the dataframe index if not already
|
759
|
-
site_data = site_data.set_index(
|
849
|
+
site_data = site_data.set_index(ResourceMetaField.GID,
|
850
|
+
drop=True)
|
760
851
|
|
761
|
-
if
|
762
|
-
if site_data[
|
852
|
+
if "offshore" in site_data:
|
853
|
+
if site_data["offshore"].sum() > 1:
|
763
854
|
w = ('Found offshore sites in econ site data input. '
|
764
855
|
'This functionality has been deprecated. '
|
765
856
|
'Please run the reV offshore module to '
|
@@ -783,7 +874,6 @@ class BaseGen(ABC):
|
|
783
874
|
"""
|
784
875
|
self.project_points.join_df(site_data, key=self.site_data.index.name)
|
785
876
|
|
786
|
-
@abstractmethod
|
787
877
|
def _parse_output_request(self, req):
|
788
878
|
"""Set the output variables requested from the user.
|
789
879
|
|
@@ -797,6 +887,12 @@ class BaseGen(ABC):
|
|
797
887
|
output_request : list
|
798
888
|
Output variables requested from SAM.
|
799
889
|
"""
|
890
|
+
output_request = self._output_request_type_check(req)
|
891
|
+
|
892
|
+
if "lcoe_fcr" in output_request:
|
893
|
+
output_request = _add_lcoe_outputs(output_request)
|
894
|
+
|
895
|
+
return output_request
|
800
896
|
|
801
897
|
def _get_data_shape(self, dset, n_sites):
|
802
898
|
"""Get the output array shape based on OUT_ATTRS or PySAM.Outputs.
|
@@ -824,23 +920,25 @@ class BaseGen(ABC):
|
|
824
920
|
|
825
921
|
def _get_data_shape_from_out_attrs(self, dset, n_sites):
|
826
922
|
"""Get data shape from ``OUT_ATTRS`` variable"""
|
827
|
-
if self.OUT_ATTRS[dset][
|
923
|
+
if self.OUT_ATTRS[dset]["type"] == "array":
|
828
924
|
return (len(self.time_index), n_sites)
|
829
925
|
return (n_sites,)
|
830
926
|
|
831
927
|
def _get_data_shape_from_sam_config(self, dset, n_sites):
|
832
|
-
"""Get data shape from SAM input config
|
928
|
+
"""Get data shape from SAM input config"""
|
833
929
|
data = list(self.project_points.sam_inputs.values())[0][dset]
|
834
930
|
if isinstance(data, (list, tuple, np.ndarray)):
|
835
931
|
return (*np.array(data).shape, n_sites)
|
836
932
|
|
837
933
|
if isinstance(data, str):
|
838
|
-
msg = (
|
839
|
-
|
934
|
+
msg = (
|
935
|
+
'Cannot pass through non-scalar SAM input key "{}" '
|
936
|
+
"as an output_request!".format(dset)
|
937
|
+
)
|
840
938
|
logger.error(msg)
|
841
939
|
raise ExecutionError(msg)
|
842
940
|
|
843
|
-
return (n_sites,
|
941
|
+
return (n_sites,)
|
844
942
|
|
845
943
|
def _get_data_shape_from_pysam(self, dset, n_sites):
|
846
944
|
"""Get data shape from PySAM output object"""
|
@@ -850,10 +948,13 @@ class BaseGen(ABC):
|
|
850
948
|
try:
|
851
949
|
out_data = getattr(self._sam_obj_default.Outputs, dset)
|
852
950
|
except AttributeError as e:
|
853
|
-
msg = (
|
854
|
-
|
855
|
-
|
856
|
-
|
951
|
+
msg = (
|
952
|
+
'Could not get data shape for dset "{}" '
|
953
|
+
'from object "{}". '
|
954
|
+
'Received the following error: "{}"'.format(
|
955
|
+
dset, self._sam_obj_default, e
|
956
|
+
)
|
957
|
+
)
|
857
958
|
logger.error(msg)
|
858
959
|
raise ExecutionError(msg) from e
|
859
960
|
|
@@ -873,26 +974,27 @@ class BaseGen(ABC):
|
|
873
974
|
project_dir, out_fn = os.path.split(out_fpath)
|
874
975
|
|
875
976
|
# ensure output file is an h5
|
876
|
-
if not out_fn.endswith(
|
877
|
-
out_fn +=
|
977
|
+
if not out_fn.endswith(".h5"):
|
978
|
+
out_fn += ".h5"
|
878
979
|
|
879
980
|
if module not in out_fn:
|
880
981
|
extension_with_module = "_{}.h5".format(module)
|
881
982
|
out_fn = out_fn.replace(".h5", extension_with_module)
|
882
983
|
|
883
984
|
# ensure year is in out_fpath
|
884
|
-
if self.year is not None
|
985
|
+
if self.year is not None:
|
885
986
|
extension_with_year = "_{}.h5".format(self.year)
|
886
|
-
|
987
|
+
if extension_with_year not in out_fn:
|
988
|
+
out_fn = out_fn.replace(".h5", extension_with_year)
|
887
989
|
|
888
990
|
# create and use optional output dir
|
889
991
|
if project_dir and not os.path.exists(project_dir):
|
890
992
|
os.makedirs(project_dir, exist_ok=True)
|
891
993
|
|
892
994
|
self._out_fpath = os.path.join(project_dir, out_fn)
|
893
|
-
self._run_attrs[
|
995
|
+
self._run_attrs["out_fpath"] = out_fpath
|
894
996
|
|
895
|
-
def _init_h5(self, mode=
|
997
|
+
def _init_h5(self, mode="w"):
|
896
998
|
"""Initialize the single h5 output file with all output requests.
|
897
999
|
|
898
1000
|
Parameters
|
@@ -904,12 +1006,18 @@ class BaseGen(ABC):
|
|
904
1006
|
if self._out_fpath is None:
|
905
1007
|
return
|
906
1008
|
|
907
|
-
if
|
908
|
-
logger.info(
|
909
|
-
|
910
|
-
|
911
|
-
|
912
|
-
|
1009
|
+
if "w" in mode:
|
1010
|
+
logger.info(
|
1011
|
+
'Initializing full output file: "{}" with mode: {}'.format(
|
1012
|
+
self._out_fpath, mode
|
1013
|
+
)
|
1014
|
+
)
|
1015
|
+
elif "a" in mode:
|
1016
|
+
logger.info(
|
1017
|
+
'Appending data to output file: "{}" with mode: {}'.format(
|
1018
|
+
self._out_fpath, mode
|
1019
|
+
)
|
1020
|
+
)
|
913
1021
|
|
914
1022
|
attrs = {d: {} for d in self.output_request}
|
915
1023
|
chunks = {}
|
@@ -920,17 +1028,16 @@ class BaseGen(ABC):
|
|
920
1028
|
write_ti = False
|
921
1029
|
|
922
1030
|
for dset in self.output_request:
|
923
|
-
|
924
|
-
tmp = 'other'
|
1031
|
+
tmp = "other"
|
925
1032
|
if dset in self.OUT_ATTRS:
|
926
1033
|
tmp = dset
|
927
1034
|
|
928
|
-
attrs[dset][
|
929
|
-
|
930
|
-
|
931
|
-
|
932
|
-
chunks[dset] = self.OUT_ATTRS[tmp].get(
|
933
|
-
dtypes[dset] = self.OUT_ATTRS[tmp].get(
|
1035
|
+
attrs[dset]["units"] = self.OUT_ATTRS[tmp].get("units", "unknown")
|
1036
|
+
attrs[dset]["scale_factor"] = self.OUT_ATTRS[tmp].get(
|
1037
|
+
"scale_factor", 1
|
1038
|
+
)
|
1039
|
+
chunks[dset] = self.OUT_ATTRS[tmp].get("chunks", None)
|
1040
|
+
dtypes[dset] = self.OUT_ATTRS[tmp].get("dtype", "float32")
|
934
1041
|
shapes[dset] = self._get_data_shape(dset, len(self.meta))
|
935
1042
|
if len(shapes[dset]) > 1:
|
936
1043
|
write_ti = True
|
@@ -941,10 +1048,19 @@ class BaseGen(ABC):
|
|
941
1048
|
else:
|
942
1049
|
ti = None
|
943
1050
|
|
944
|
-
Outputs.init_h5(
|
945
|
-
|
946
|
-
|
947
|
-
|
1051
|
+
Outputs.init_h5(
|
1052
|
+
self._out_fpath,
|
1053
|
+
self.output_request,
|
1054
|
+
shapes,
|
1055
|
+
attrs,
|
1056
|
+
chunks,
|
1057
|
+
dtypes,
|
1058
|
+
self.meta,
|
1059
|
+
time_index=ti,
|
1060
|
+
configs=self.sam_metas,
|
1061
|
+
run_attrs=self.run_attrs,
|
1062
|
+
mode=mode,
|
1063
|
+
)
|
948
1064
|
|
949
1065
|
def _init_out_arrays(self, index_0=0):
|
950
1066
|
"""Initialize output arrays based on the number of sites that can be
|
@@ -962,21 +1078,27 @@ class BaseGen(ABC):
|
|
962
1078
|
self._finished_sites = []
|
963
1079
|
|
964
1080
|
# Output chunk is the index range (inclusive) of this set of site outs
|
965
|
-
self._out_chunk = (
|
966
|
-
|
1081
|
+
self._out_chunk = (
|
1082
|
+
index_0,
|
1083
|
+
np.min((index_0 + self.site_limit, len(self.project_points) - 1)),
|
1084
|
+
)
|
967
1085
|
self._out_n_sites = int(self.out_chunk[1] - self.out_chunk[0]) + 1
|
968
1086
|
|
969
|
-
logger.info(
|
970
|
-
|
971
|
-
|
972
|
-
|
973
|
-
|
974
|
-
|
1087
|
+
logger.info(
|
1088
|
+
"Initializing in-memory outputs for {} sites with gids "
|
1089
|
+
"{} through {} inclusive (site list index {} through {})".format(
|
1090
|
+
self._out_n_sites,
|
1091
|
+
self.project_points.sites[self.out_chunk[0]],
|
1092
|
+
self.project_points.sites[self.out_chunk[1]],
|
1093
|
+
self.out_chunk[0],
|
1094
|
+
self.out_chunk[1],
|
1095
|
+
)
|
1096
|
+
)
|
975
1097
|
|
976
1098
|
for request in self.output_request:
|
977
|
-
dtype =
|
1099
|
+
dtype = "float32"
|
978
1100
|
if request in self.OUT_ATTRS and self.scale_outputs:
|
979
|
-
dtype = self.OUT_ATTRS[request].get(
|
1101
|
+
dtype = self.OUT_ATTRS[request].get("dtype", "float32")
|
980
1102
|
|
981
1103
|
shape = self._get_data_shape(request, self._out_n_sites)
|
982
1104
|
|
@@ -1004,9 +1126,11 @@ class BaseGen(ABC):
|
|
1004
1126
|
# iterate through the site results
|
1005
1127
|
for var, value in site_output.items():
|
1006
1128
|
if var not in self._out:
|
1007
|
-
raise KeyError(
|
1008
|
-
|
1009
|
-
|
1129
|
+
raise KeyError(
|
1130
|
+
'Tried to collect output variable "{}", but it '
|
1131
|
+
"was not yet initialized in the output "
|
1132
|
+
"dictionary."
|
1133
|
+
)
|
1010
1134
|
|
1011
1135
|
# get the index in the output array for the current site
|
1012
1136
|
i = self.site_index(site_gid, out_index=True)
|
@@ -1055,12 +1179,14 @@ class BaseGen(ABC):
|
|
1055
1179
|
else:
|
1056
1180
|
output_index = global_site_index - self.out_chunk[0]
|
1057
1181
|
if output_index < 0:
|
1058
|
-
raise ValueError(
|
1059
|
-
|
1060
|
-
|
1061
|
-
|
1062
|
-
|
1063
|
-
|
1182
|
+
raise ValueError(
|
1183
|
+
"Attempting to set output data for site with "
|
1184
|
+
"gid {} to global site index {}, which was "
|
1185
|
+
"already set based on the current output "
|
1186
|
+
"index chunk of {}".format(
|
1187
|
+
site_gid, global_site_index, self.out_chunk
|
1188
|
+
)
|
1189
|
+
)
|
1064
1190
|
|
1065
1191
|
return output_index
|
1066
1192
|
|
@@ -1075,15 +1201,17 @@ class BaseGen(ABC):
|
|
1075
1201
|
|
1076
1202
|
# handle output file request if file is specified and .out is not empty
|
1077
1203
|
if isinstance(self._out_fpath, str) and self._out:
|
1078
|
-
logger.info(
|
1079
|
-
|
1204
|
+
logger.info(
|
1205
|
+
'Flushing outputs to disk, target file: "{}"'.format(
|
1206
|
+
self._out_fpath
|
1207
|
+
)
|
1208
|
+
)
|
1080
1209
|
|
1081
1210
|
# get the slice of indices to write outputs to
|
1082
1211
|
islice = slice(self.out_chunk[0], self.out_chunk[1] + 1)
|
1083
1212
|
|
1084
1213
|
# open output file in append mode to add output results to
|
1085
|
-
with Outputs(self._out_fpath, mode=
|
1086
|
-
|
1214
|
+
with Outputs(self._out_fpath, mode="a") as f:
|
1087
1215
|
# iterate through all output requests writing each as a dataset
|
1088
1216
|
for dset, arr in self._out.items():
|
1089
1217
|
if len(arr.shape) == 1:
|
@@ -1093,7 +1221,7 @@ class BaseGen(ABC):
|
|
1093
1221
|
# write 2D array of profiles
|
1094
1222
|
f[dset, :, islice] = arr
|
1095
1223
|
|
1096
|
-
logger.debug(
|
1224
|
+
logger.debug("Flushed output successfully to disk.")
|
1097
1225
|
|
1098
1226
|
def _pre_split_pc(self, pool_size=None):
|
1099
1227
|
"""Pre-split project control iterator into sub chunks to further
|
@@ -1129,11 +1257,15 @@ class BaseGen(ABC):
|
|
1129
1257
|
if i_chunk:
|
1130
1258
|
pc_chunks.append(i_chunk)
|
1131
1259
|
|
1132
|
-
logger.debug(
|
1133
|
-
|
1134
|
-
|
1260
|
+
logger.debug(
|
1261
|
+
"Pre-splitting points control into {} chunks with the "
|
1262
|
+
"following chunk sizes: {}".format(
|
1263
|
+
len(pc_chunks), [len(x) for x in pc_chunks]
|
1264
|
+
)
|
1265
|
+
)
|
1135
1266
|
return N, pc_chunks
|
1136
1267
|
|
1268
|
+
# pylint: disable=unused-argument
|
1137
1269
|
def _reduce_kwargs(self, pc, **kwargs):
|
1138
1270
|
"""Placeholder for functions that need to reduce the global kwargs that
|
1139
1271
|
they send to workers to reduce memory footprint
|
@@ -1153,8 +1285,9 @@ class BaseGen(ABC):
|
|
1153
1285
|
"""
|
1154
1286
|
return kwargs
|
1155
1287
|
|
1156
|
-
def _parallel_run(
|
1157
|
-
|
1288
|
+
def _parallel_run(
|
1289
|
+
self, max_workers=None, pool_size=None, timeout=1800, **kwargs
|
1290
|
+
):
|
1158
1291
|
"""Execute parallel compute.
|
1159
1292
|
|
1160
1293
|
Parameters
|
@@ -1176,25 +1309,31 @@ class BaseGen(ABC):
|
|
1176
1309
|
pool_size = os.cpu_count() * 2
|
1177
1310
|
if max_workers is None:
|
1178
1311
|
max_workers = os.cpu_count()
|
1179
|
-
logger.info(
|
1180
|
-
|
1312
|
+
logger.info(
|
1313
|
+
"Running parallel execution with max_workers={}".format(
|
1314
|
+
max_workers
|
1315
|
+
)
|
1316
|
+
)
|
1181
1317
|
i = 0
|
1182
1318
|
N, pc_chunks = self._pre_split_pc(pool_size=pool_size)
|
1183
1319
|
for j, pc_chunk in enumerate(pc_chunks):
|
1184
|
-
logger.debug(
|
1185
|
-
|
1186
|
-
|
1320
|
+
logger.debug(
|
1321
|
+
"Starting process pool for points control "
|
1322
|
+
"iteration {} out of {}".format(j + 1, len(pc_chunks))
|
1323
|
+
)
|
1187
1324
|
|
1188
1325
|
failed_futures = False
|
1189
1326
|
chunks = {}
|
1190
1327
|
futures = []
|
1191
|
-
loggers = [__name__,
|
1192
|
-
with SpawnProcessPool(
|
1193
|
-
|
1328
|
+
loggers = [__name__, "reV.gen", "reV.econ", "reV"]
|
1329
|
+
with SpawnProcessPool(
|
1330
|
+
max_workers=max_workers, loggers=loggers
|
1331
|
+
) as exe:
|
1194
1332
|
for pc in pc_chunk:
|
1195
1333
|
pc_kwargs = self._reduce_kwargs(pc, **kwargs)
|
1196
|
-
future = exe.submit(
|
1197
|
-
|
1334
|
+
future = exe.submit(
|
1335
|
+
self._run_single_worker, pc, **pc_kwargs
|
1336
|
+
)
|
1198
1337
|
futures.append(future)
|
1199
1338
|
chunks[future] = pc
|
1200
1339
|
|
@@ -1205,24 +1344,32 @@ class BaseGen(ABC):
|
|
1205
1344
|
except TimeoutError:
|
1206
1345
|
failed_futures = True
|
1207
1346
|
sites = chunks[future].project_points.sites
|
1208
|
-
result = self._handle_failed_future(
|
1209
|
-
|
1347
|
+
result = self._handle_failed_future(
|
1348
|
+
future, i, sites, timeout
|
1349
|
+
)
|
1210
1350
|
|
1211
1351
|
self.out = result
|
1212
1352
|
|
1213
1353
|
mem = psutil.virtual_memory()
|
1214
|
-
m = (
|
1215
|
-
|
1216
|
-
|
1217
|
-
|
1218
|
-
|
1219
|
-
|
1354
|
+
m = (
|
1355
|
+
"Parallel run at iteration {0} out of {1}. "
|
1356
|
+
"Memory utilization is {2:.3f} GB out of {3:.3f} GB "
|
1357
|
+
"total ({4:.1f}% used, intended limit of {5:.1f}%)"
|
1358
|
+
.format(
|
1359
|
+
i,
|
1360
|
+
N,
|
1361
|
+
mem.used / 1e9,
|
1362
|
+
mem.total / 1e9,
|
1363
|
+
100 * mem.used / mem.total,
|
1364
|
+
100 * self.mem_util_lim,
|
1365
|
+
)
|
1366
|
+
)
|
1220
1367
|
logger.info(m)
|
1221
1368
|
|
1222
1369
|
if failed_futures:
|
1223
|
-
logger.info(
|
1370
|
+
logger.info("Forcing pool shutdown after failed futures.")
|
1224
1371
|
exe.shutdown(wait=False)
|
1225
|
-
logger.info(
|
1372
|
+
logger.info("Forced pool shutdown complete.")
|
1226
1373
|
|
1227
1374
|
self.flush()
|
1228
1375
|
|
@@ -1242,23 +1389,23 @@ class BaseGen(ABC):
|
|
1242
1389
|
before returning zeros.
|
1243
1390
|
"""
|
1244
1391
|
|
1245
|
-
w = (
|
1246
|
-
.format(i, timeout))
|
1392
|
+
w = ("Iteration {} hit the timeout limit of {} seconds! "
|
1393
|
+
"Passing zeros.".format(i, timeout))
|
1247
1394
|
logger.warning(w)
|
1248
1395
|
warn(w, OutputWarning)
|
1249
1396
|
|
1250
|
-
site_out =
|
1251
|
-
result =
|
1397
|
+
site_out = dict.fromkeys(self.output_request, 0)
|
1398
|
+
result = dict.fromkeys(sites, site_out)
|
1252
1399
|
|
1253
1400
|
try:
|
1254
1401
|
cancelled = future.cancel()
|
1255
1402
|
except Exception as e:
|
1256
|
-
w =
|
1403
|
+
w = "Could not cancel future! Received exception: {}".format(e)
|
1257
1404
|
logger.warning(w)
|
1258
1405
|
warn(w, ParallelExecutionWarning)
|
1259
1406
|
|
1260
1407
|
if not cancelled:
|
1261
|
-
w =
|
1408
|
+
w = "Could not cancel future!"
|
1262
1409
|
logger.warning(w)
|
1263
1410
|
warn(w, ParallelExecutionWarning)
|
1264
1411
|
|