NREL-reV 0.8.7__py3-none-any.whl → 0.8.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.8.9.dist-info}/METADATA +12 -10
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.8.9.dist-info}/RECORD +38 -38
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.8.9.dist-info}/WHEEL +1 -1
- reV/SAM/SAM.py +182 -133
- reV/SAM/econ.py +18 -14
- reV/SAM/generation.py +608 -419
- reV/SAM/windbos.py +93 -79
- reV/bespoke/bespoke.py +690 -445
- reV/bespoke/place_turbines.py +6 -6
- reV/config/project_points.py +220 -140
- reV/econ/econ.py +165 -113
- reV/econ/economies_of_scale.py +57 -34
- reV/generation/base.py +310 -183
- reV/generation/generation.py +298 -190
- reV/handlers/exclusions.py +16 -15
- reV/handlers/multi_year.py +12 -9
- reV/handlers/outputs.py +6 -5
- reV/hybrids/hybrid_methods.py +28 -30
- reV/hybrids/hybrids.py +304 -188
- reV/nrwal/nrwal.py +262 -168
- reV/qa_qc/cli_qa_qc.py +14 -10
- reV/qa_qc/qa_qc.py +217 -119
- reV/qa_qc/summary.py +228 -146
- reV/rep_profiles/rep_profiles.py +349 -230
- reV/supply_curve/aggregation.py +349 -188
- reV/supply_curve/competitive_wind_farms.py +90 -48
- reV/supply_curve/exclusions.py +138 -85
- reV/supply_curve/extent.py +75 -50
- reV/supply_curve/points.py +536 -309
- reV/supply_curve/sc_aggregation.py +366 -225
- reV/supply_curve/supply_curve.py +505 -308
- reV/supply_curve/tech_mapping.py +144 -82
- reV/utilities/__init__.py +199 -16
- reV/utilities/pytest_utils.py +8 -4
- reV/version.py +1 -1
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.8.9.dist-info}/LICENSE +0 -0
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.8.9.dist-info}/entry_points.txt +0 -0
- {NREL_reV-0.8.7.dist-info → NREL_reV-0.8.9.dist-info}/top_level.txt +0 -0
reV/generation/generation.py
CHANGED
@@ -2,6 +2,7 @@
|
|
2
2
|
"""
|
3
3
|
reV generation module.
|
4
4
|
"""
|
5
|
+
|
5
6
|
import copy
|
6
7
|
import json
|
7
8
|
import logging
|
@@ -10,42 +11,46 @@ import pprint
|
|
10
11
|
|
11
12
|
import numpy as np
|
12
13
|
import pandas as pd
|
13
|
-
|
14
|
-
from reV.generation.base import BaseGen
|
15
|
-
from reV.SAM.generation import (Geothermal,
|
16
|
-
MhkWave,
|
17
|
-
LinearDirectSteam,
|
18
|
-
PvSamv1,
|
19
|
-
PvWattsv5,
|
20
|
-
PvWattsv7,
|
21
|
-
PvWattsv8,
|
22
|
-
SolarWaterHeat,
|
23
|
-
TcsMoltenSalt,
|
24
|
-
TroughPhysicalHeat,
|
25
|
-
WindPower)
|
26
|
-
from reV.utilities import ModuleName
|
27
|
-
from reV.utilities.exceptions import (ConfigError,
|
28
|
-
InputError,
|
29
|
-
ProjectPointsValueError)
|
30
14
|
from rex.multi_file_resource import MultiFileResource
|
31
15
|
from rex.multi_res_resource import MultiResolutionResource
|
32
16
|
from rex.resource import Resource
|
33
17
|
from rex.utilities.utilities import check_res_file
|
34
18
|
|
19
|
+
from reV.generation.base import BaseGen
|
20
|
+
from reV.SAM.generation import (
|
21
|
+
Geothermal,
|
22
|
+
LinearDirectSteam,
|
23
|
+
MhkWave,
|
24
|
+
PvSamv1,
|
25
|
+
PvWattsv5,
|
26
|
+
PvWattsv7,
|
27
|
+
PvWattsv8,
|
28
|
+
SolarWaterHeat,
|
29
|
+
TcsMoltenSalt,
|
30
|
+
TroughPhysicalHeat,
|
31
|
+
WindPower,
|
32
|
+
)
|
33
|
+
from reV.utilities import ModuleName, ResourceMetaField, SupplyCurveField
|
34
|
+
from reV.utilities.exceptions import (
|
35
|
+
ConfigError,
|
36
|
+
InputError,
|
37
|
+
ProjectPointsValueError,
|
38
|
+
)
|
39
|
+
|
35
40
|
logger = logging.getLogger(__name__)
|
36
41
|
|
37
42
|
|
38
43
|
ATTR_DIR = os.path.dirname(os.path.realpath(__file__))
|
39
|
-
ATTR_DIR = os.path.join(ATTR_DIR,
|
40
|
-
with open(os.path.join(ATTR_DIR,
|
44
|
+
ATTR_DIR = os.path.join(ATTR_DIR, "output_attributes")
|
45
|
+
with open(os.path.join(ATTR_DIR, "other.json")) as f:
|
41
46
|
OTHER_ATTRS = json.load(f)
|
42
|
-
with open(os.path.join(ATTR_DIR,
|
47
|
+
with open(os.path.join(ATTR_DIR, "generation.json")) as f:
|
43
48
|
GEN_ATTRS = json.load(f)
|
44
|
-
with open(os.path.join(ATTR_DIR,
|
49
|
+
with open(os.path.join(ATTR_DIR, "linear_fresnel.json")) as f:
|
45
50
|
LIN_ATTRS = json.load(f)
|
46
|
-
with open(os.path.join(ATTR_DIR,
|
51
|
+
with open(os.path.join(ATTR_DIR, "solar_water_heat.json")) as f:
|
47
52
|
SWH_ATTRS = json.load(f)
|
48
|
-
with open(os.path.join(ATTR_DIR,
|
53
|
+
with open(os.path.join(ATTR_DIR, "trough_heat.json")) as f:
|
49
54
|
TPPH_ATTRS = json.load(f)
|
50
55
|
|
51
56
|
|
@@ -53,17 +58,19 @@ class Gen(BaseGen):
|
|
53
58
|
"""Gen"""
|
54
59
|
|
55
60
|
# Mapping of reV technology strings to SAM generation objects
|
56
|
-
OPTIONS = {
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
61
|
+
OPTIONS = {
|
62
|
+
"geothermal": Geothermal,
|
63
|
+
"lineardirectsteam": LinearDirectSteam,
|
64
|
+
"mhkwave": MhkWave,
|
65
|
+
"pvsamv1": PvSamv1,
|
66
|
+
"pvwattsv5": PvWattsv5,
|
67
|
+
"pvwattsv7": PvWattsv7,
|
68
|
+
"pvwattsv8": PvWattsv8,
|
69
|
+
"solarwaterheat": SolarWaterHeat,
|
70
|
+
"tcsmoltensalt": TcsMoltenSalt,
|
71
|
+
"troughphysicalheat": TroughPhysicalHeat,
|
72
|
+
"windpower": WindPower,
|
73
|
+
}
|
67
74
|
|
68
75
|
"""reV technology options."""
|
69
76
|
|
@@ -76,13 +83,25 @@ class Gen(BaseGen):
|
|
76
83
|
OUT_ATTRS.update(TPPH_ATTRS)
|
77
84
|
OUT_ATTRS.update(BaseGen.ECON_ATTRS)
|
78
85
|
|
79
|
-
def __init__(
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
+
def __init__(
|
87
|
+
self,
|
88
|
+
technology,
|
89
|
+
project_points,
|
90
|
+
sam_files,
|
91
|
+
resource_file,
|
92
|
+
low_res_resource_file=None,
|
93
|
+
output_request=("cf_mean",),
|
94
|
+
site_data=None,
|
95
|
+
curtailment=None,
|
96
|
+
gid_map=None,
|
97
|
+
drop_leap=False,
|
98
|
+
sites_per_worker=None,
|
99
|
+
memory_utilization_limit=0.4,
|
100
|
+
scale_outputs=True,
|
101
|
+
write_mapped_gids=False,
|
102
|
+
bias_correct=None,
|
103
|
+
):
|
104
|
+
"""ReV generation analysis class.
|
86
105
|
|
87
106
|
``reV`` generation analysis runs SAM simulations by piping in
|
88
107
|
renewable energy resource data (usually from the NSRDB or WTK),
|
@@ -121,14 +140,14 @@ class Gen(BaseGen):
|
|
121
140
|
{'cf_mean': array([0.16966143], dtype=float32)}
|
122
141
|
>>>
|
123
142
|
>>> sites = [3, 4, 7, 9]
|
124
|
-
>>> req = ('cf_mean', '
|
143
|
+
>>> req = ('cf_mean', 'lcoe_fcr')
|
125
144
|
>>> gen = Gen(sam_tech, sites, fp_sam, fp_res, output_request=req)
|
126
145
|
>>> gen.run()
|
127
146
|
>>>
|
128
147
|
>>> gen.out
|
129
148
|
{'lcoe_fcr': array([131.39166, 131.31221, 127.54539, 125.49656]),
|
130
|
-
|
131
|
-
|
149
|
+
'cf_mean': array([0.17713654, 0.17724372, 0.1824783 , 0.1854574 ]),
|
150
|
+
: array([[0., 0., 0., 0.],
|
132
151
|
[0., 0., 0., 0.],
|
133
152
|
[0., 0., 0., 0.],
|
134
153
|
...,
|
@@ -361,22 +380,33 @@ class Gen(BaseGen):
|
|
361
380
|
``bias_correct`` table on a site-by-site basis. If ``None``, no
|
362
381
|
corrections are applied. By default, ``None``.
|
363
382
|
"""
|
364
|
-
pc = self.get_pc(
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
383
|
+
pc = self.get_pc(
|
384
|
+
points=project_points,
|
385
|
+
points_range=None,
|
386
|
+
sam_configs=sam_files,
|
387
|
+
tech=technology,
|
388
|
+
sites_per_worker=sites_per_worker,
|
389
|
+
res_file=resource_file,
|
390
|
+
curtailment=curtailment,
|
391
|
+
)
|
392
|
+
|
393
|
+
super().__init__(
|
394
|
+
pc,
|
395
|
+
output_request,
|
396
|
+
site_data=site_data,
|
397
|
+
drop_leap=drop_leap,
|
398
|
+
memory_utilization_limit=memory_utilization_limit,
|
399
|
+
scale_outputs=scale_outputs,
|
400
|
+
)
|
374
401
|
|
375
402
|
if self.tech not in self.OPTIONS:
|
376
|
-
msg = (
|
377
|
-
|
378
|
-
|
379
|
-
|
403
|
+
msg = (
|
404
|
+
'Requested technology "{}" is not available. '
|
405
|
+
"reV generation can analyze the following "
|
406
|
+
"SAM technologies: {}".format(
|
407
|
+
self.tech, list(self.OPTIONS.keys())
|
408
|
+
)
|
409
|
+
)
|
380
410
|
logger.error(msg)
|
381
411
|
raise KeyError(msg)
|
382
412
|
|
@@ -384,8 +414,8 @@ class Gen(BaseGen):
|
|
384
414
|
self._res_file = resource_file
|
385
415
|
self._lr_res_file = low_res_resource_file
|
386
416
|
self._sam_module = self.OPTIONS[self.tech]
|
387
|
-
self._run_attrs[
|
388
|
-
self._run_attrs[
|
417
|
+
self._run_attrs["sam_module"] = self._sam_module.MODULE
|
418
|
+
self._run_attrs["res_file"] = resource_file
|
389
419
|
|
390
420
|
self._multi_h5_res, self._hsds = check_res_file(resource_file)
|
391
421
|
self._gid_map = self._parse_gid_map(gid_map)
|
@@ -424,11 +454,12 @@ class Gen(BaseGen):
|
|
424
454
|
Meta data df for sites in project points. Column names are meta
|
425
455
|
data variables, rows are different sites. The row index
|
426
456
|
does not indicate the site number if the project points are
|
427
|
-
non-sequential or do not start from 0, so a
|
457
|
+
non-sequential or do not start from 0, so a `SupplyCurveField.GID`
|
458
|
+
column is added.
|
428
459
|
"""
|
429
460
|
if self._meta is None:
|
430
461
|
res_cls = Resource
|
431
|
-
kwargs = {
|
462
|
+
kwargs = {"hsds": self._hsds}
|
432
463
|
if self._multi_h5_res:
|
433
464
|
res_cls = MultiFileResource
|
434
465
|
kwargs = {}
|
@@ -438,25 +469,28 @@ class Gen(BaseGen):
|
|
438
469
|
res_gids = [self._gid_map[i] for i in res_gids]
|
439
470
|
|
440
471
|
with res_cls(self.res_file, **kwargs) as res:
|
441
|
-
meta_len = res.shapes[
|
472
|
+
meta_len = res.shapes["meta"][0]
|
442
473
|
|
443
474
|
if np.max(res_gids) > meta_len:
|
444
|
-
msg = (
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
475
|
+
msg = (
|
476
|
+
"ProjectPoints has a max site gid of {} which is "
|
477
|
+
"out of bounds for the meta data of len {} from "
|
478
|
+
"resource file: {}".format(
|
479
|
+
np.max(res_gids), meta_len, self.res_file
|
480
|
+
)
|
481
|
+
)
|
449
482
|
logger.error(msg)
|
450
483
|
raise ProjectPointsValueError(msg)
|
451
484
|
|
452
|
-
self._meta = res[
|
485
|
+
self._meta = res["meta", res_gids]
|
453
486
|
|
454
|
-
self._meta.loc[:,
|
487
|
+
self._meta.loc[:, ResourceMetaField.GID] = res_gids
|
455
488
|
if self.write_mapped_gids:
|
456
|
-
|
489
|
+
sites = self.project_points.sites
|
490
|
+
self._meta.loc[:, ResourceMetaField.GID] = sites
|
457
491
|
self._meta.index = self.project_points.sites
|
458
|
-
self._meta.index.name =
|
459
|
-
self._meta.loc[:,
|
492
|
+
self._meta.index.name = ResourceMetaField.GID
|
493
|
+
self._meta.loc[:, "reV_tech"] = self.project_points.tech
|
460
494
|
|
461
495
|
return self._meta
|
462
496
|
|
@@ -472,7 +506,7 @@ class Gen(BaseGen):
|
|
472
506
|
if self._time_index is None:
|
473
507
|
if not self._multi_h5_res:
|
474
508
|
res_cls = Resource
|
475
|
-
kwargs = {
|
509
|
+
kwargs = {"hsds": self._hsds}
|
476
510
|
else:
|
477
511
|
res_cls = MultiFileResource
|
478
512
|
kwargs = {}
|
@@ -484,19 +518,22 @@ class Gen(BaseGen):
|
|
484
518
|
step = self.project_points.sam_config_obj.time_index_step
|
485
519
|
if downscale is not None:
|
486
520
|
from rex.utilities.downscale import make_time_index
|
521
|
+
|
487
522
|
year = time_index.year[0]
|
488
|
-
ds_freq = downscale[
|
523
|
+
ds_freq = downscale["frequency"]
|
489
524
|
time_index = make_time_index(year, ds_freq)
|
490
|
-
logger.info(
|
491
|
-
|
492
|
-
|
493
|
-
|
525
|
+
logger.info(
|
526
|
+
"reV solar generation running with temporal "
|
527
|
+
'downscaling frequency "{}" with final '
|
528
|
+
"time_index length {}".format(ds_freq, len(time_index))
|
529
|
+
)
|
494
530
|
elif step is not None:
|
495
531
|
time_index = time_index[::step]
|
496
532
|
|
497
533
|
time_index = self.handle_lifetime_index(time_index)
|
498
|
-
time_index = self.handle_leap_ti(
|
499
|
-
|
534
|
+
time_index = self.handle_leap_ti(
|
535
|
+
time_index, drop_leap=self._drop_leap
|
536
|
+
)
|
500
537
|
|
501
538
|
self._time_index = time_index
|
502
539
|
|
@@ -530,30 +567,32 @@ class Gen(BaseGen):
|
|
530
567
|
# Only one time index may be passed, check that lifetime periods match
|
531
568
|
n_unique_periods = len(np.unique(lifetime_periods))
|
532
569
|
if n_unique_periods != 1:
|
533
|
-
msg = (
|
534
|
-
|
535
|
-
|
536
|
-
|
570
|
+
msg = (
|
571
|
+
"reV cannot handle multiple analysis_periods when "
|
572
|
+
"modeling with `system_use_lifetime_output` set "
|
573
|
+
"to 1. Found {} different analysis_periods in the SAM "
|
574
|
+
"configs".format(n_unique_periods)
|
575
|
+
)
|
537
576
|
logger.error(msg)
|
538
577
|
raise ConfigError(msg)
|
539
578
|
|
540
579
|
# Collect requested variables to check for lifetime compatibility
|
541
580
|
array_vars = [
|
542
|
-
var for var, attrs in GEN_ATTRS.items()
|
543
|
-
if attrs['type'] == 'array'
|
581
|
+
var for var, attrs in GEN_ATTRS.items() if attrs["type"] == "array"
|
544
582
|
]
|
545
|
-
valid_vars = [
|
583
|
+
valid_vars = ["gen_profile", "cf_profile", "cf_profile_ac"]
|
546
584
|
invalid_vars = set(array_vars) - set(valid_vars)
|
547
|
-
invalid_requests = [
|
548
|
-
|
585
|
+
invalid_requests = [
|
586
|
+
var for var in self.output_request if var in invalid_vars
|
587
|
+
]
|
549
588
|
|
550
589
|
if invalid_requests:
|
551
590
|
# SAM does not output full lifetime for all array variables
|
552
591
|
msg = (
|
553
|
-
|
554
|
-
|
555
|
-
|
556
|
-
|
592
|
+
"reV can only handle the following output arrays "
|
593
|
+
"when modeling with `system_use_lifetime_output` set "
|
594
|
+
"to 1: {}. Try running without {}.".format(
|
595
|
+
", ".join(valid_vars), ", ".join(invalid_requests)
|
557
596
|
)
|
558
597
|
)
|
559
598
|
logger.error(msg)
|
@@ -561,8 +600,10 @@ class Gen(BaseGen):
|
|
561
600
|
|
562
601
|
sam_meta = self.sam_metas[next(iter(self.sam_metas))]
|
563
602
|
analysis_period = sam_meta["analysis_period"]
|
564
|
-
logger.info(
|
565
|
-
|
603
|
+
logger.info(
|
604
|
+
"reV generation running with a full system "
|
605
|
+
"life of {} years.".format(analysis_period)
|
606
|
+
)
|
566
607
|
|
567
608
|
old_end = ti[-1]
|
568
609
|
new_end = old_end + pd.DateOffset(years=analysis_period - 1)
|
@@ -573,10 +614,18 @@ class Gen(BaseGen):
|
|
573
614
|
return ti
|
574
615
|
|
575
616
|
@classmethod
|
576
|
-
def _run_single_worker(
|
577
|
-
|
578
|
-
|
579
|
-
|
617
|
+
def _run_single_worker(
|
618
|
+
cls,
|
619
|
+
points_control,
|
620
|
+
tech=None,
|
621
|
+
res_file=None,
|
622
|
+
lr_res_file=None,
|
623
|
+
output_request=None,
|
624
|
+
scale_outputs=True,
|
625
|
+
gid_map=None,
|
626
|
+
nn_map=None,
|
627
|
+
bias_correct=None,
|
628
|
+
):
|
580
629
|
"""Run a SAM generation analysis based on the points_control iterator.
|
581
630
|
|
582
631
|
Parameters
|
@@ -637,21 +686,24 @@ class Gen(BaseGen):
|
|
637
686
|
|
638
687
|
# Extract the site df from the project points df.
|
639
688
|
site_df = points_control.project_points.df
|
640
|
-
site_df = site_df.set_index(
|
689
|
+
site_df = site_df.set_index(ResourceMetaField.GID, drop=True)
|
641
690
|
|
642
691
|
# run generation method for specified technology
|
643
692
|
try:
|
644
693
|
out = cls.OPTIONS[tech].reV_run(
|
645
|
-
points_control,
|
694
|
+
points_control,
|
695
|
+
res_file,
|
696
|
+
site_df,
|
646
697
|
lr_res_file=lr_res_file,
|
647
698
|
output_request=output_request,
|
648
|
-
gid_map=gid_map,
|
649
|
-
|
699
|
+
gid_map=gid_map,
|
700
|
+
nn_map=nn_map,
|
701
|
+
bias_correct=bias_correct,
|
650
702
|
)
|
651
703
|
|
652
704
|
except Exception as e:
|
653
705
|
out = {}
|
654
|
-
logger.exception(
|
706
|
+
logger.exception("Worker failed for PC: {}".format(points_control))
|
655
707
|
raise e
|
656
708
|
|
657
709
|
if scale_outputs:
|
@@ -661,8 +713,8 @@ class Gen(BaseGen):
|
|
661
713
|
# iterate through variable names in each site's output dict
|
662
714
|
if k in cls.OUT_ATTRS:
|
663
715
|
# get dtype and scale for output variable name
|
664
|
-
dtype = cls.OUT_ATTRS[k].get(
|
665
|
-
scale_factor = cls.OUT_ATTRS[k].get(
|
716
|
+
dtype = cls.OUT_ATTRS[k].get("dtype", "float32")
|
717
|
+
scale_factor = cls.OUT_ATTRS[k].get("scale_factor", 1)
|
666
718
|
|
667
719
|
# apply scale factor and dtype
|
668
720
|
out[site][k] *= scale_factor
|
@@ -676,8 +728,9 @@ class Gen(BaseGen):
|
|
676
728
|
out[site][k] = out[site][k].astype(dtype)
|
677
729
|
else:
|
678
730
|
# use numpy array conversion for scalar values
|
679
|
-
out[site][k] = np.array(
|
680
|
-
|
731
|
+
out[site][k] = np.array(
|
732
|
+
[out[site][k]], dtype=dtype
|
733
|
+
)[0]
|
681
734
|
|
682
735
|
return out
|
683
736
|
|
@@ -701,44 +754,56 @@ class Gen(BaseGen):
|
|
701
754
|
"""
|
702
755
|
|
703
756
|
if isinstance(gid_map, str):
|
704
|
-
if gid_map.endswith(
|
757
|
+
if gid_map.endswith(".csv"):
|
705
758
|
gid_map = pd.read_csv(gid_map).to_dict()
|
706
|
-
|
707
|
-
assert
|
708
|
-
gid_map
|
709
|
-
|
759
|
+
msg = f"Need {ResourceMetaField.GID} in gid_map column"
|
760
|
+
assert ResourceMetaField.GID in gid_map, msg
|
761
|
+
assert "gid_map" in gid_map, 'Need "gid_map" in gid_map column'
|
762
|
+
gid_map = {
|
763
|
+
gid_map[ResourceMetaField.GID][i]: gid_map["gid_map"][i]
|
764
|
+
for i in gid_map[ResourceMetaField.GID].keys()
|
765
|
+
}
|
710
766
|
|
711
|
-
elif gid_map.endswith(
|
712
|
-
with open(gid_map
|
767
|
+
elif gid_map.endswith(".json"):
|
768
|
+
with open(gid_map) as f:
|
713
769
|
gid_map = json.load(f)
|
714
770
|
|
715
771
|
if isinstance(gid_map, dict):
|
716
772
|
if not self._multi_h5_res:
|
717
773
|
res_cls = Resource
|
718
|
-
kwargs = {
|
774
|
+
kwargs = {"hsds": self._hsds}
|
719
775
|
else:
|
720
776
|
res_cls = MultiFileResource
|
721
777
|
kwargs = {}
|
722
778
|
|
723
779
|
with res_cls(self.res_file, **kwargs) as res:
|
724
780
|
for gen_gid, res_gid in gid_map.items():
|
725
|
-
msg1 = (
|
726
|
-
|
727
|
-
|
728
|
-
|
729
|
-
|
781
|
+
msg1 = (
|
782
|
+
"gid_map values must all be int but received "
|
783
|
+
"{}: {}".format(gen_gid, res_gid)
|
784
|
+
)
|
785
|
+
msg2 = (
|
786
|
+
"Could not find the gen_gid to res_gid mapping "
|
787
|
+
"{}: {} in the resource meta data.".format(
|
788
|
+
gen_gid, res_gid
|
789
|
+
)
|
790
|
+
)
|
730
791
|
assert isinstance(gen_gid, int), msg1
|
731
792
|
assert isinstance(res_gid, int), msg1
|
732
793
|
assert res_gid in res.meta.index.values, msg2
|
733
794
|
|
734
795
|
for gen_gid in self.project_points.sites:
|
735
|
-
msg3 = (
|
736
|
-
|
796
|
+
msg3 = (
|
797
|
+
"Could not find the project points gid {} in the "
|
798
|
+
"gen_gid input of the gid_map.".format(gen_gid)
|
799
|
+
)
|
737
800
|
assert gen_gid in gid_map, msg3
|
738
801
|
|
739
802
|
elif gid_map is not None:
|
740
|
-
msg = (
|
741
|
-
|
803
|
+
msg = (
|
804
|
+
"Could not parse gid_map, must be None, dict, or path to "
|
805
|
+
"csv or json, but received: {}".format(gid_map)
|
806
|
+
)
|
742
807
|
logger.error(msg)
|
743
808
|
raise InputError(msg)
|
744
809
|
|
@@ -758,24 +823,32 @@ class Gen(BaseGen):
|
|
758
823
|
"""
|
759
824
|
nn_map = None
|
760
825
|
if self.lr_res_file is not None:
|
761
|
-
|
762
826
|
handler_class = Resource
|
763
|
-
if
|
827
|
+
if "*" in self.res_file or "*" in self.lr_res_file:
|
764
828
|
handler_class = MultiFileResource
|
765
829
|
|
766
|
-
with handler_class(self.res_file) as hr_res
|
767
|
-
|
768
|
-
|
769
|
-
|
770
|
-
|
771
|
-
|
772
|
-
|
773
|
-
|
830
|
+
with handler_class(self.res_file) as hr_res, handler_class(
|
831
|
+
self.lr_res_file
|
832
|
+
) as lr_res:
|
833
|
+
logger.info(
|
834
|
+
"Making nearest neighbor map for multi "
|
835
|
+
"resolution resource data..."
|
836
|
+
)
|
837
|
+
nn_d, nn_map = MultiResolutionResource.make_nn_map(
|
838
|
+
hr_res, lr_res
|
839
|
+
)
|
840
|
+
logger.info(
|
841
|
+
"Done making nearest neighbor map for multi "
|
842
|
+
"resolution resource data!"
|
843
|
+
)
|
774
844
|
|
775
|
-
logger.info(
|
776
|
-
|
777
|
-
|
778
|
-
|
845
|
+
logger.info(
|
846
|
+
"Made nearest neighbor mapping between nominal-"
|
847
|
+
"resolution and low-resolution resource files. "
|
848
|
+
"Min / mean / max dist: {:.3f} / {:.3f} / {:.3f}".format(
|
849
|
+
nn_d.min(), nn_d.mean(), nn_d.max()
|
850
|
+
)
|
851
|
+
)
|
779
852
|
|
780
853
|
return nn_map
|
781
854
|
|
@@ -829,23 +902,34 @@ class Gen(BaseGen):
|
|
829
902
|
if isinstance(bias_correct, type(None)):
|
830
903
|
return bias_correct
|
831
904
|
|
832
|
-
|
833
|
-
bias_correct = pd.read_csv(bias_correct)
|
905
|
+
if isinstance(bias_correct, str):
|
906
|
+
bias_correct = pd.read_csv(bias_correct).rename(
|
907
|
+
SupplyCurveField.map_to(ResourceMetaField), axis=1
|
908
|
+
)
|
834
909
|
|
835
|
-
msg = (
|
836
|
-
|
910
|
+
msg = (
|
911
|
+
"Bias correction data must be a filepath to csv or a dataframe "
|
912
|
+
"but received: {}".format(type(bias_correct))
|
913
|
+
)
|
837
914
|
assert isinstance(bias_correct, pd.DataFrame), msg
|
838
915
|
|
839
|
-
msg = (
|
840
|
-
|
841
|
-
|
916
|
+
msg = (
|
917
|
+
"Bias correction table must have {!r} column but only found: "
|
918
|
+
"{}".format(ResourceMetaField.GID, list(bias_correct.columns))
|
919
|
+
)
|
920
|
+
assert (
|
921
|
+
ResourceMetaField.GID in bias_correct
|
922
|
+
or bias_correct.index.name == ResourceMetaField.GID
|
923
|
+
), msg
|
842
924
|
|
843
|
-
if bias_correct.index.name !=
|
844
|
-
bias_correct = bias_correct.set_index(
|
925
|
+
if bias_correct.index.name != ResourceMetaField.GID:
|
926
|
+
bias_correct = bias_correct.set_index(ResourceMetaField.GID)
|
845
927
|
|
846
|
-
msg = (
|
847
|
-
|
848
|
-
|
928
|
+
msg = (
|
929
|
+
'Bias correction table must have "method" column but only '
|
930
|
+
"found: {}".format(list(bias_correct.columns))
|
931
|
+
)
|
932
|
+
assert "method" in bias_correct, msg
|
849
933
|
|
850
934
|
return bias_correct
|
851
935
|
|
@@ -866,13 +950,15 @@ class Gen(BaseGen):
|
|
866
950
|
output_request = self._output_request_type_check(req)
|
867
951
|
|
868
952
|
# ensure that cf_mean is requested from output
|
869
|
-
if
|
870
|
-
output_request.append(
|
953
|
+
if "cf_mean" not in output_request:
|
954
|
+
output_request.append("cf_mean")
|
871
955
|
|
872
956
|
for request in output_request:
|
873
957
|
if request not in self.OUT_ATTRS:
|
874
|
-
msg = (
|
875
|
-
|
958
|
+
msg = (
|
959
|
+
'User output request "{}" not recognized. '
|
960
|
+
"Will attempt to extract from PySAM.".format(request)
|
961
|
+
)
|
876
962
|
logger.debug(msg)
|
877
963
|
|
878
964
|
return list(set(output_request))
|
@@ -896,20 +982,19 @@ class Gen(BaseGen):
|
|
896
982
|
"""
|
897
983
|
|
898
984
|
gids = pc.project_points.gids
|
899
|
-
gid_map = kwargs.get(
|
900
|
-
bias_correct = kwargs.get(
|
985
|
+
gid_map = kwargs.get("gid_map", None)
|
986
|
+
bias_correct = kwargs.get("bias_correct", None)
|
901
987
|
|
902
988
|
if bias_correct is not None:
|
903
989
|
if gid_map is not None:
|
904
990
|
gids = [gid_map[gid] for gid in gids]
|
905
991
|
|
906
992
|
mask = bias_correct.index.isin(gids)
|
907
|
-
kwargs[
|
993
|
+
kwargs["bias_correct"] = bias_correct[mask]
|
908
994
|
|
909
995
|
return kwargs
|
910
996
|
|
911
|
-
def run(self, out_fpath=None, max_workers=1, timeout=1800,
|
912
|
-
pool_size=None):
|
997
|
+
def run(self, out_fpath=None, max_workers=1, timeout=1800, pool_size=None):
|
913
998
|
"""Execute a parallel reV generation run with smart data flushing.
|
914
999
|
|
915
1000
|
Parameters
|
@@ -947,45 +1032,68 @@ class Gen(BaseGen):
|
|
947
1032
|
if pool_size is None:
|
948
1033
|
pool_size = os.cpu_count() * 2
|
949
1034
|
|
950
|
-
kwargs = {
|
951
|
-
|
952
|
-
|
953
|
-
|
954
|
-
|
955
|
-
|
956
|
-
|
957
|
-
|
958
|
-
|
959
|
-
|
960
|
-
|
961
|
-
logger.
|
962
|
-
|
963
|
-
|
964
|
-
|
965
|
-
|
966
|
-
|
1035
|
+
kwargs = {
|
1036
|
+
"tech": self.tech,
|
1037
|
+
"res_file": self.res_file,
|
1038
|
+
"lr_res_file": self.lr_res_file,
|
1039
|
+
"output_request": self.output_request,
|
1040
|
+
"scale_outputs": self.scale_outputs,
|
1041
|
+
"gid_map": self._gid_map,
|
1042
|
+
"nn_map": self._nn_map,
|
1043
|
+
"bias_correct": self._bc,
|
1044
|
+
}
|
1045
|
+
|
1046
|
+
logger.info(
|
1047
|
+
"Running reV generation for: {}".format(self.points_control)
|
1048
|
+
)
|
1049
|
+
logger.debug(
|
1050
|
+
'The following project points were specified: "{}"'.format(
|
1051
|
+
self.project_points
|
1052
|
+
)
|
1053
|
+
)
|
1054
|
+
logger.debug(
|
1055
|
+
"The following SAM configs are available to this run:\n{}".format(
|
1056
|
+
pprint.pformat(self.sam_configs, indent=4)
|
1057
|
+
)
|
1058
|
+
)
|
1059
|
+
logger.debug(
|
1060
|
+
"The SAM output variables have been requested:\n{}".format(
|
1061
|
+
self.output_request
|
1062
|
+
)
|
1063
|
+
)
|
967
1064
|
|
968
1065
|
# use serial or parallel execution control based on max_workers
|
969
1066
|
try:
|
970
1067
|
if max_workers == 1:
|
971
|
-
logger.debug(
|
972
|
-
|
1068
|
+
logger.debug(
|
1069
|
+
"Running serial generation for: {}".format(
|
1070
|
+
self.points_control
|
1071
|
+
)
|
1072
|
+
)
|
973
1073
|
for i, pc_sub in enumerate(self.points_control):
|
974
1074
|
self.out = self._run_single_worker(pc_sub, **kwargs)
|
975
|
-
logger.info(
|
976
|
-
|
977
|
-
|
978
|
-
|
1075
|
+
logger.info(
|
1076
|
+
"Finished reV gen serial compute for: {} "
|
1077
|
+
"(iteration {} out of {})".format(
|
1078
|
+
pc_sub, i + 1, len(self.points_control)
|
1079
|
+
)
|
1080
|
+
)
|
979
1081
|
self.flush()
|
980
1082
|
else:
|
981
|
-
logger.debug(
|
982
|
-
|
983
|
-
|
984
|
-
|
985
|
-
|
1083
|
+
logger.debug(
|
1084
|
+
"Running parallel generation for: {}".format(
|
1085
|
+
self.points_control
|
1086
|
+
)
|
1087
|
+
)
|
1088
|
+
self._parallel_run(
|
1089
|
+
max_workers=max_workers,
|
1090
|
+
pool_size=pool_size,
|
1091
|
+
timeout=timeout,
|
1092
|
+
**kwargs,
|
1093
|
+
)
|
986
1094
|
|
987
1095
|
except Exception as e:
|
988
|
-
logger.exception(
|
1096
|
+
logger.exception("reV generation failed!")
|
989
1097
|
raise e
|
990
1098
|
|
991
1099
|
return self._out_fpath
|