NREL-reV 0.8.6__py3-none-any.whl → 0.8.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {NREL_reV-0.8.6.dist-info → NREL_reV-0.8.9.dist-info}/METADATA +12 -10
- {NREL_reV-0.8.6.dist-info → NREL_reV-0.8.9.dist-info}/RECORD +38 -38
- {NREL_reV-0.8.6.dist-info → NREL_reV-0.8.9.dist-info}/WHEEL +1 -1
- reV/SAM/SAM.py +182 -133
- reV/SAM/econ.py +18 -14
- reV/SAM/generation.py +640 -414
- reV/SAM/windbos.py +93 -79
- reV/bespoke/bespoke.py +690 -445
- reV/bespoke/place_turbines.py +6 -6
- reV/config/project_points.py +220 -140
- reV/econ/econ.py +165 -113
- reV/econ/economies_of_scale.py +57 -34
- reV/generation/base.py +310 -183
- reV/generation/generation.py +309 -191
- reV/handlers/exclusions.py +16 -15
- reV/handlers/multi_year.py +12 -9
- reV/handlers/outputs.py +6 -5
- reV/hybrids/hybrid_methods.py +28 -30
- reV/hybrids/hybrids.py +304 -188
- reV/nrwal/nrwal.py +262 -168
- reV/qa_qc/cli_qa_qc.py +14 -10
- reV/qa_qc/qa_qc.py +217 -119
- reV/qa_qc/summary.py +228 -146
- reV/rep_profiles/rep_profiles.py +349 -230
- reV/supply_curve/aggregation.py +349 -188
- reV/supply_curve/competitive_wind_farms.py +90 -48
- reV/supply_curve/exclusions.py +138 -85
- reV/supply_curve/extent.py +75 -50
- reV/supply_curve/points.py +620 -295
- reV/supply_curve/sc_aggregation.py +396 -226
- reV/supply_curve/supply_curve.py +505 -308
- reV/supply_curve/tech_mapping.py +144 -82
- reV/utilities/__init__.py +199 -16
- reV/utilities/pytest_utils.py +8 -4
- reV/version.py +1 -1
- {NREL_reV-0.8.6.dist-info → NREL_reV-0.8.9.dist-info}/LICENSE +0 -0
- {NREL_reV-0.8.6.dist-info → NREL_reV-0.8.9.dist-info}/entry_points.txt +0 -0
- {NREL_reV-0.8.6.dist-info → NREL_reV-0.8.9.dist-info}/top_level.txt +0 -0
reV/generation/generation.py
CHANGED
@@ -2,6 +2,7 @@
|
|
2
2
|
"""
|
3
3
|
reV generation module.
|
4
4
|
"""
|
5
|
+
|
5
6
|
import copy
|
6
7
|
import json
|
7
8
|
import logging
|
@@ -10,42 +11,46 @@ import pprint
|
|
10
11
|
|
11
12
|
import numpy as np
|
12
13
|
import pandas as pd
|
13
|
-
|
14
|
-
from reV.generation.base import BaseGen
|
15
|
-
from reV.SAM.generation import (Geothermal,
|
16
|
-
MhkWave,
|
17
|
-
LinearDirectSteam,
|
18
|
-
PvSamv1,
|
19
|
-
PvWattsv5,
|
20
|
-
PvWattsv7,
|
21
|
-
PvWattsv8,
|
22
|
-
SolarWaterHeat,
|
23
|
-
TcsMoltenSalt,
|
24
|
-
TroughPhysicalHeat,
|
25
|
-
WindPower)
|
26
|
-
from reV.utilities import ModuleName
|
27
|
-
from reV.utilities.exceptions import (ConfigError,
|
28
|
-
InputError,
|
29
|
-
ProjectPointsValueError)
|
30
14
|
from rex.multi_file_resource import MultiFileResource
|
31
15
|
from rex.multi_res_resource import MultiResolutionResource
|
32
16
|
from rex.resource import Resource
|
33
17
|
from rex.utilities.utilities import check_res_file
|
34
18
|
|
19
|
+
from reV.generation.base import BaseGen
|
20
|
+
from reV.SAM.generation import (
|
21
|
+
Geothermal,
|
22
|
+
LinearDirectSteam,
|
23
|
+
MhkWave,
|
24
|
+
PvSamv1,
|
25
|
+
PvWattsv5,
|
26
|
+
PvWattsv7,
|
27
|
+
PvWattsv8,
|
28
|
+
SolarWaterHeat,
|
29
|
+
TcsMoltenSalt,
|
30
|
+
TroughPhysicalHeat,
|
31
|
+
WindPower,
|
32
|
+
)
|
33
|
+
from reV.utilities import ModuleName, ResourceMetaField, SupplyCurveField
|
34
|
+
from reV.utilities.exceptions import (
|
35
|
+
ConfigError,
|
36
|
+
InputError,
|
37
|
+
ProjectPointsValueError,
|
38
|
+
)
|
39
|
+
|
35
40
|
logger = logging.getLogger(__name__)
|
36
41
|
|
37
42
|
|
38
43
|
ATTR_DIR = os.path.dirname(os.path.realpath(__file__))
|
39
|
-
ATTR_DIR = os.path.join(ATTR_DIR,
|
40
|
-
with open(os.path.join(ATTR_DIR,
|
44
|
+
ATTR_DIR = os.path.join(ATTR_DIR, "output_attributes")
|
45
|
+
with open(os.path.join(ATTR_DIR, "other.json")) as f:
|
41
46
|
OTHER_ATTRS = json.load(f)
|
42
|
-
with open(os.path.join(ATTR_DIR,
|
47
|
+
with open(os.path.join(ATTR_DIR, "generation.json")) as f:
|
43
48
|
GEN_ATTRS = json.load(f)
|
44
|
-
with open(os.path.join(ATTR_DIR,
|
49
|
+
with open(os.path.join(ATTR_DIR, "linear_fresnel.json")) as f:
|
45
50
|
LIN_ATTRS = json.load(f)
|
46
|
-
with open(os.path.join(ATTR_DIR,
|
51
|
+
with open(os.path.join(ATTR_DIR, "solar_water_heat.json")) as f:
|
47
52
|
SWH_ATTRS = json.load(f)
|
48
|
-
with open(os.path.join(ATTR_DIR,
|
53
|
+
with open(os.path.join(ATTR_DIR, "trough_heat.json")) as f:
|
49
54
|
TPPH_ATTRS = json.load(f)
|
50
55
|
|
51
56
|
|
@@ -53,17 +58,19 @@ class Gen(BaseGen):
|
|
53
58
|
"""Gen"""
|
54
59
|
|
55
60
|
# Mapping of reV technology strings to SAM generation objects
|
56
|
-
OPTIONS = {
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
61
|
+
OPTIONS = {
|
62
|
+
"geothermal": Geothermal,
|
63
|
+
"lineardirectsteam": LinearDirectSteam,
|
64
|
+
"mhkwave": MhkWave,
|
65
|
+
"pvsamv1": PvSamv1,
|
66
|
+
"pvwattsv5": PvWattsv5,
|
67
|
+
"pvwattsv7": PvWattsv7,
|
68
|
+
"pvwattsv8": PvWattsv8,
|
69
|
+
"solarwaterheat": SolarWaterHeat,
|
70
|
+
"tcsmoltensalt": TcsMoltenSalt,
|
71
|
+
"troughphysicalheat": TroughPhysicalHeat,
|
72
|
+
"windpower": WindPower,
|
73
|
+
}
|
67
74
|
|
68
75
|
"""reV technology options."""
|
69
76
|
|
@@ -76,13 +83,25 @@ class Gen(BaseGen):
|
|
76
83
|
OUT_ATTRS.update(TPPH_ATTRS)
|
77
84
|
OUT_ATTRS.update(BaseGen.ECON_ATTRS)
|
78
85
|
|
79
|
-
def __init__(
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
+
def __init__(
|
87
|
+
self,
|
88
|
+
technology,
|
89
|
+
project_points,
|
90
|
+
sam_files,
|
91
|
+
resource_file,
|
92
|
+
low_res_resource_file=None,
|
93
|
+
output_request=("cf_mean",),
|
94
|
+
site_data=None,
|
95
|
+
curtailment=None,
|
96
|
+
gid_map=None,
|
97
|
+
drop_leap=False,
|
98
|
+
sites_per_worker=None,
|
99
|
+
memory_utilization_limit=0.4,
|
100
|
+
scale_outputs=True,
|
101
|
+
write_mapped_gids=False,
|
102
|
+
bias_correct=None,
|
103
|
+
):
|
104
|
+
"""ReV generation analysis class.
|
86
105
|
|
87
106
|
``reV`` generation analysis runs SAM simulations by piping in
|
88
107
|
renewable energy resource data (usually from the NSRDB or WTK),
|
@@ -121,14 +140,14 @@ class Gen(BaseGen):
|
|
121
140
|
{'cf_mean': array([0.16966143], dtype=float32)}
|
122
141
|
>>>
|
123
142
|
>>> sites = [3, 4, 7, 9]
|
124
|
-
>>> req = ('cf_mean', '
|
143
|
+
>>> req = ('cf_mean', 'lcoe_fcr')
|
125
144
|
>>> gen = Gen(sam_tech, sites, fp_sam, fp_res, output_request=req)
|
126
145
|
>>> gen.run()
|
127
146
|
>>>
|
128
147
|
>>> gen.out
|
129
148
|
{'lcoe_fcr': array([131.39166, 131.31221, 127.54539, 125.49656]),
|
130
|
-
|
131
|
-
|
149
|
+
'cf_mean': array([0.17713654, 0.17724372, 0.1824783 , 0.1854574 ]),
|
150
|
+
: array([[0., 0., 0., 0.],
|
132
151
|
[0., 0., 0., 0.],
|
133
152
|
[0., 0., 0., 0.],
|
134
153
|
...,
|
@@ -289,7 +308,17 @@ class Gen(BaseGen):
|
|
289
308
|
site-specific values. Note that some or all site-specific
|
290
309
|
inputs can be specified via the `project_points` input
|
291
310
|
table instead. If ``None``, no site-specific data is
|
292
|
-
considered.
|
311
|
+
considered.
|
312
|
+
|
313
|
+
.. Note:: This input is often used to provide site-based
|
314
|
+
regional capital cost multipliers. ``reV`` does not
|
315
|
+
ingest multipliers directly; instead, this file is
|
316
|
+
expected to have a ``capital_cost`` column that gives the
|
317
|
+
multiplier-adjusted capital cost value for each location.
|
318
|
+
Therefore, you *must* re-create this input file every
|
319
|
+
time you change your base capital cost assumption.
|
320
|
+
|
321
|
+
By default, ``None``.
|
293
322
|
curtailment : dict | str, optional
|
294
323
|
Inputs for curtailment parameters, which can be:
|
295
324
|
|
@@ -351,22 +380,33 @@ class Gen(BaseGen):
|
|
351
380
|
``bias_correct`` table on a site-by-site basis. If ``None``, no
|
352
381
|
corrections are applied. By default, ``None``.
|
353
382
|
"""
|
354
|
-
pc = self.get_pc(
|
355
|
-
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
|
383
|
+
pc = self.get_pc(
|
384
|
+
points=project_points,
|
385
|
+
points_range=None,
|
386
|
+
sam_configs=sam_files,
|
387
|
+
tech=technology,
|
388
|
+
sites_per_worker=sites_per_worker,
|
389
|
+
res_file=resource_file,
|
390
|
+
curtailment=curtailment,
|
391
|
+
)
|
392
|
+
|
393
|
+
super().__init__(
|
394
|
+
pc,
|
395
|
+
output_request,
|
396
|
+
site_data=site_data,
|
397
|
+
drop_leap=drop_leap,
|
398
|
+
memory_utilization_limit=memory_utilization_limit,
|
399
|
+
scale_outputs=scale_outputs,
|
400
|
+
)
|
364
401
|
|
365
402
|
if self.tech not in self.OPTIONS:
|
366
|
-
msg = (
|
367
|
-
|
368
|
-
|
369
|
-
|
403
|
+
msg = (
|
404
|
+
'Requested technology "{}" is not available. '
|
405
|
+
"reV generation can analyze the following "
|
406
|
+
"SAM technologies: {}".format(
|
407
|
+
self.tech, list(self.OPTIONS.keys())
|
408
|
+
)
|
409
|
+
)
|
370
410
|
logger.error(msg)
|
371
411
|
raise KeyError(msg)
|
372
412
|
|
@@ -374,8 +414,8 @@ class Gen(BaseGen):
|
|
374
414
|
self._res_file = resource_file
|
375
415
|
self._lr_res_file = low_res_resource_file
|
376
416
|
self._sam_module = self.OPTIONS[self.tech]
|
377
|
-
self._run_attrs[
|
378
|
-
self._run_attrs[
|
417
|
+
self._run_attrs["sam_module"] = self._sam_module.MODULE
|
418
|
+
self._run_attrs["res_file"] = resource_file
|
379
419
|
|
380
420
|
self._multi_h5_res, self._hsds = check_res_file(resource_file)
|
381
421
|
self._gid_map = self._parse_gid_map(gid_map)
|
@@ -414,11 +454,12 @@ class Gen(BaseGen):
|
|
414
454
|
Meta data df for sites in project points. Column names are meta
|
415
455
|
data variables, rows are different sites. The row index
|
416
456
|
does not indicate the site number if the project points are
|
417
|
-
non-sequential or do not start from 0, so a
|
457
|
+
non-sequential or do not start from 0, so a `SupplyCurveField.GID`
|
458
|
+
column is added.
|
418
459
|
"""
|
419
460
|
if self._meta is None:
|
420
461
|
res_cls = Resource
|
421
|
-
kwargs = {
|
462
|
+
kwargs = {"hsds": self._hsds}
|
422
463
|
if self._multi_h5_res:
|
423
464
|
res_cls = MultiFileResource
|
424
465
|
kwargs = {}
|
@@ -428,25 +469,28 @@ class Gen(BaseGen):
|
|
428
469
|
res_gids = [self._gid_map[i] for i in res_gids]
|
429
470
|
|
430
471
|
with res_cls(self.res_file, **kwargs) as res:
|
431
|
-
meta_len = res.shapes[
|
472
|
+
meta_len = res.shapes["meta"][0]
|
432
473
|
|
433
474
|
if np.max(res_gids) > meta_len:
|
434
|
-
msg = (
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
475
|
+
msg = (
|
476
|
+
"ProjectPoints has a max site gid of {} which is "
|
477
|
+
"out of bounds for the meta data of len {} from "
|
478
|
+
"resource file: {}".format(
|
479
|
+
np.max(res_gids), meta_len, self.res_file
|
480
|
+
)
|
481
|
+
)
|
439
482
|
logger.error(msg)
|
440
483
|
raise ProjectPointsValueError(msg)
|
441
484
|
|
442
|
-
self._meta = res[
|
485
|
+
self._meta = res["meta", res_gids]
|
443
486
|
|
444
|
-
self._meta.loc[:,
|
487
|
+
self._meta.loc[:, ResourceMetaField.GID] = res_gids
|
445
488
|
if self.write_mapped_gids:
|
446
|
-
|
489
|
+
sites = self.project_points.sites
|
490
|
+
self._meta.loc[:, ResourceMetaField.GID] = sites
|
447
491
|
self._meta.index = self.project_points.sites
|
448
|
-
self._meta.index.name =
|
449
|
-
self._meta.loc[:,
|
492
|
+
self._meta.index.name = ResourceMetaField.GID
|
493
|
+
self._meta.loc[:, "reV_tech"] = self.project_points.tech
|
450
494
|
|
451
495
|
return self._meta
|
452
496
|
|
@@ -462,7 +506,7 @@ class Gen(BaseGen):
|
|
462
506
|
if self._time_index is None:
|
463
507
|
if not self._multi_h5_res:
|
464
508
|
res_cls = Resource
|
465
|
-
kwargs = {
|
509
|
+
kwargs = {"hsds": self._hsds}
|
466
510
|
else:
|
467
511
|
res_cls = MultiFileResource
|
468
512
|
kwargs = {}
|
@@ -474,19 +518,22 @@ class Gen(BaseGen):
|
|
474
518
|
step = self.project_points.sam_config_obj.time_index_step
|
475
519
|
if downscale is not None:
|
476
520
|
from rex.utilities.downscale import make_time_index
|
521
|
+
|
477
522
|
year = time_index.year[0]
|
478
|
-
ds_freq = downscale[
|
523
|
+
ds_freq = downscale["frequency"]
|
479
524
|
time_index = make_time_index(year, ds_freq)
|
480
|
-
logger.info(
|
481
|
-
|
482
|
-
|
483
|
-
|
525
|
+
logger.info(
|
526
|
+
"reV solar generation running with temporal "
|
527
|
+
'downscaling frequency "{}" with final '
|
528
|
+
"time_index length {}".format(ds_freq, len(time_index))
|
529
|
+
)
|
484
530
|
elif step is not None:
|
485
531
|
time_index = time_index[::step]
|
486
532
|
|
487
533
|
time_index = self.handle_lifetime_index(time_index)
|
488
|
-
time_index = self.handle_leap_ti(
|
489
|
-
|
534
|
+
time_index = self.handle_leap_ti(
|
535
|
+
time_index, drop_leap=self._drop_leap
|
536
|
+
)
|
490
537
|
|
491
538
|
self._time_index = time_index
|
492
539
|
|
@@ -520,30 +567,32 @@ class Gen(BaseGen):
|
|
520
567
|
# Only one time index may be passed, check that lifetime periods match
|
521
568
|
n_unique_periods = len(np.unique(lifetime_periods))
|
522
569
|
if n_unique_periods != 1:
|
523
|
-
msg = (
|
524
|
-
|
525
|
-
|
526
|
-
|
570
|
+
msg = (
|
571
|
+
"reV cannot handle multiple analysis_periods when "
|
572
|
+
"modeling with `system_use_lifetime_output` set "
|
573
|
+
"to 1. Found {} different analysis_periods in the SAM "
|
574
|
+
"configs".format(n_unique_periods)
|
575
|
+
)
|
527
576
|
logger.error(msg)
|
528
577
|
raise ConfigError(msg)
|
529
578
|
|
530
579
|
# Collect requested variables to check for lifetime compatibility
|
531
580
|
array_vars = [
|
532
|
-
var for var, attrs in GEN_ATTRS.items()
|
533
|
-
if attrs['type'] == 'array'
|
581
|
+
var for var, attrs in GEN_ATTRS.items() if attrs["type"] == "array"
|
534
582
|
]
|
535
|
-
valid_vars = [
|
583
|
+
valid_vars = ["gen_profile", "cf_profile", "cf_profile_ac"]
|
536
584
|
invalid_vars = set(array_vars) - set(valid_vars)
|
537
|
-
invalid_requests = [
|
538
|
-
|
585
|
+
invalid_requests = [
|
586
|
+
var for var in self.output_request if var in invalid_vars
|
587
|
+
]
|
539
588
|
|
540
589
|
if invalid_requests:
|
541
590
|
# SAM does not output full lifetime for all array variables
|
542
591
|
msg = (
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
|
592
|
+
"reV can only handle the following output arrays "
|
593
|
+
"when modeling with `system_use_lifetime_output` set "
|
594
|
+
"to 1: {}. Try running without {}.".format(
|
595
|
+
", ".join(valid_vars), ", ".join(invalid_requests)
|
547
596
|
)
|
548
597
|
)
|
549
598
|
logger.error(msg)
|
@@ -551,8 +600,10 @@ class Gen(BaseGen):
|
|
551
600
|
|
552
601
|
sam_meta = self.sam_metas[next(iter(self.sam_metas))]
|
553
602
|
analysis_period = sam_meta["analysis_period"]
|
554
|
-
logger.info(
|
555
|
-
|
603
|
+
logger.info(
|
604
|
+
"reV generation running with a full system "
|
605
|
+
"life of {} years.".format(analysis_period)
|
606
|
+
)
|
556
607
|
|
557
608
|
old_end = ti[-1]
|
558
609
|
new_end = old_end + pd.DateOffset(years=analysis_period - 1)
|
@@ -563,10 +614,18 @@ class Gen(BaseGen):
|
|
563
614
|
return ti
|
564
615
|
|
565
616
|
@classmethod
|
566
|
-
def _run_single_worker(
|
567
|
-
|
568
|
-
|
569
|
-
|
617
|
+
def _run_single_worker(
|
618
|
+
cls,
|
619
|
+
points_control,
|
620
|
+
tech=None,
|
621
|
+
res_file=None,
|
622
|
+
lr_res_file=None,
|
623
|
+
output_request=None,
|
624
|
+
scale_outputs=True,
|
625
|
+
gid_map=None,
|
626
|
+
nn_map=None,
|
627
|
+
bias_correct=None,
|
628
|
+
):
|
570
629
|
"""Run a SAM generation analysis based on the points_control iterator.
|
571
630
|
|
572
631
|
Parameters
|
@@ -627,21 +686,24 @@ class Gen(BaseGen):
|
|
627
686
|
|
628
687
|
# Extract the site df from the project points df.
|
629
688
|
site_df = points_control.project_points.df
|
630
|
-
site_df = site_df.set_index(
|
689
|
+
site_df = site_df.set_index(ResourceMetaField.GID, drop=True)
|
631
690
|
|
632
691
|
# run generation method for specified technology
|
633
692
|
try:
|
634
693
|
out = cls.OPTIONS[tech].reV_run(
|
635
|
-
points_control,
|
694
|
+
points_control,
|
695
|
+
res_file,
|
696
|
+
site_df,
|
636
697
|
lr_res_file=lr_res_file,
|
637
698
|
output_request=output_request,
|
638
|
-
gid_map=gid_map,
|
639
|
-
|
699
|
+
gid_map=gid_map,
|
700
|
+
nn_map=nn_map,
|
701
|
+
bias_correct=bias_correct,
|
640
702
|
)
|
641
703
|
|
642
704
|
except Exception as e:
|
643
705
|
out = {}
|
644
|
-
logger.exception(
|
706
|
+
logger.exception("Worker failed for PC: {}".format(points_control))
|
645
707
|
raise e
|
646
708
|
|
647
709
|
if scale_outputs:
|
@@ -651,8 +713,8 @@ class Gen(BaseGen):
|
|
651
713
|
# iterate through variable names in each site's output dict
|
652
714
|
if k in cls.OUT_ATTRS:
|
653
715
|
# get dtype and scale for output variable name
|
654
|
-
dtype = cls.OUT_ATTRS[k].get(
|
655
|
-
scale_factor = cls.OUT_ATTRS[k].get(
|
716
|
+
dtype = cls.OUT_ATTRS[k].get("dtype", "float32")
|
717
|
+
scale_factor = cls.OUT_ATTRS[k].get("scale_factor", 1)
|
656
718
|
|
657
719
|
# apply scale factor and dtype
|
658
720
|
out[site][k] *= scale_factor
|
@@ -666,8 +728,9 @@ class Gen(BaseGen):
|
|
666
728
|
out[site][k] = out[site][k].astype(dtype)
|
667
729
|
else:
|
668
730
|
# use numpy array conversion for scalar values
|
669
|
-
out[site][k] = np.array(
|
670
|
-
|
731
|
+
out[site][k] = np.array(
|
732
|
+
[out[site][k]], dtype=dtype
|
733
|
+
)[0]
|
671
734
|
|
672
735
|
return out
|
673
736
|
|
@@ -691,44 +754,56 @@ class Gen(BaseGen):
|
|
691
754
|
"""
|
692
755
|
|
693
756
|
if isinstance(gid_map, str):
|
694
|
-
if gid_map.endswith(
|
757
|
+
if gid_map.endswith(".csv"):
|
695
758
|
gid_map = pd.read_csv(gid_map).to_dict()
|
696
|
-
|
697
|
-
assert
|
698
|
-
gid_map
|
699
|
-
|
759
|
+
msg = f"Need {ResourceMetaField.GID} in gid_map column"
|
760
|
+
assert ResourceMetaField.GID in gid_map, msg
|
761
|
+
assert "gid_map" in gid_map, 'Need "gid_map" in gid_map column'
|
762
|
+
gid_map = {
|
763
|
+
gid_map[ResourceMetaField.GID][i]: gid_map["gid_map"][i]
|
764
|
+
for i in gid_map[ResourceMetaField.GID].keys()
|
765
|
+
}
|
700
766
|
|
701
|
-
elif gid_map.endswith(
|
702
|
-
with open(gid_map
|
767
|
+
elif gid_map.endswith(".json"):
|
768
|
+
with open(gid_map) as f:
|
703
769
|
gid_map = json.load(f)
|
704
770
|
|
705
771
|
if isinstance(gid_map, dict):
|
706
772
|
if not self._multi_h5_res:
|
707
773
|
res_cls = Resource
|
708
|
-
kwargs = {
|
774
|
+
kwargs = {"hsds": self._hsds}
|
709
775
|
else:
|
710
776
|
res_cls = MultiFileResource
|
711
777
|
kwargs = {}
|
712
778
|
|
713
779
|
with res_cls(self.res_file, **kwargs) as res:
|
714
780
|
for gen_gid, res_gid in gid_map.items():
|
715
|
-
msg1 = (
|
716
|
-
|
717
|
-
|
718
|
-
|
719
|
-
|
781
|
+
msg1 = (
|
782
|
+
"gid_map values must all be int but received "
|
783
|
+
"{}: {}".format(gen_gid, res_gid)
|
784
|
+
)
|
785
|
+
msg2 = (
|
786
|
+
"Could not find the gen_gid to res_gid mapping "
|
787
|
+
"{}: {} in the resource meta data.".format(
|
788
|
+
gen_gid, res_gid
|
789
|
+
)
|
790
|
+
)
|
720
791
|
assert isinstance(gen_gid, int), msg1
|
721
792
|
assert isinstance(res_gid, int), msg1
|
722
793
|
assert res_gid in res.meta.index.values, msg2
|
723
794
|
|
724
795
|
for gen_gid in self.project_points.sites:
|
725
|
-
msg3 = (
|
726
|
-
|
796
|
+
msg3 = (
|
797
|
+
"Could not find the project points gid {} in the "
|
798
|
+
"gen_gid input of the gid_map.".format(gen_gid)
|
799
|
+
)
|
727
800
|
assert gen_gid in gid_map, msg3
|
728
801
|
|
729
802
|
elif gid_map is not None:
|
730
|
-
msg = (
|
731
|
-
|
803
|
+
msg = (
|
804
|
+
"Could not parse gid_map, must be None, dict, or path to "
|
805
|
+
"csv or json, but received: {}".format(gid_map)
|
806
|
+
)
|
732
807
|
logger.error(msg)
|
733
808
|
raise InputError(msg)
|
734
809
|
|
@@ -748,24 +823,32 @@ class Gen(BaseGen):
|
|
748
823
|
"""
|
749
824
|
nn_map = None
|
750
825
|
if self.lr_res_file is not None:
|
751
|
-
|
752
826
|
handler_class = Resource
|
753
|
-
if
|
827
|
+
if "*" in self.res_file or "*" in self.lr_res_file:
|
754
828
|
handler_class = MultiFileResource
|
755
829
|
|
756
|
-
with handler_class(self.res_file) as hr_res
|
757
|
-
|
758
|
-
|
759
|
-
|
760
|
-
|
761
|
-
|
762
|
-
|
763
|
-
|
830
|
+
with handler_class(self.res_file) as hr_res, handler_class(
|
831
|
+
self.lr_res_file
|
832
|
+
) as lr_res:
|
833
|
+
logger.info(
|
834
|
+
"Making nearest neighbor map for multi "
|
835
|
+
"resolution resource data..."
|
836
|
+
)
|
837
|
+
nn_d, nn_map = MultiResolutionResource.make_nn_map(
|
838
|
+
hr_res, lr_res
|
839
|
+
)
|
840
|
+
logger.info(
|
841
|
+
"Done making nearest neighbor map for multi "
|
842
|
+
"resolution resource data!"
|
843
|
+
)
|
764
844
|
|
765
|
-
logger.info(
|
766
|
-
|
767
|
-
|
768
|
-
|
845
|
+
logger.info(
|
846
|
+
"Made nearest neighbor mapping between nominal-"
|
847
|
+
"resolution and low-resolution resource files. "
|
848
|
+
"Min / mean / max dist: {:.3f} / {:.3f} / {:.3f}".format(
|
849
|
+
nn_d.min(), nn_d.mean(), nn_d.max()
|
850
|
+
)
|
851
|
+
)
|
769
852
|
|
770
853
|
return nn_map
|
771
854
|
|
@@ -819,23 +902,34 @@ class Gen(BaseGen):
|
|
819
902
|
if isinstance(bias_correct, type(None)):
|
820
903
|
return bias_correct
|
821
904
|
|
822
|
-
|
823
|
-
bias_correct = pd.read_csv(bias_correct)
|
905
|
+
if isinstance(bias_correct, str):
|
906
|
+
bias_correct = pd.read_csv(bias_correct).rename(
|
907
|
+
SupplyCurveField.map_to(ResourceMetaField), axis=1
|
908
|
+
)
|
824
909
|
|
825
|
-
msg = (
|
826
|
-
|
910
|
+
msg = (
|
911
|
+
"Bias correction data must be a filepath to csv or a dataframe "
|
912
|
+
"but received: {}".format(type(bias_correct))
|
913
|
+
)
|
827
914
|
assert isinstance(bias_correct, pd.DataFrame), msg
|
828
915
|
|
829
|
-
msg = (
|
830
|
-
|
831
|
-
|
916
|
+
msg = (
|
917
|
+
"Bias correction table must have {!r} column but only found: "
|
918
|
+
"{}".format(ResourceMetaField.GID, list(bias_correct.columns))
|
919
|
+
)
|
920
|
+
assert (
|
921
|
+
ResourceMetaField.GID in bias_correct
|
922
|
+
or bias_correct.index.name == ResourceMetaField.GID
|
923
|
+
), msg
|
832
924
|
|
833
|
-
if bias_correct.index.name !=
|
834
|
-
bias_correct = bias_correct.set_index(
|
925
|
+
if bias_correct.index.name != ResourceMetaField.GID:
|
926
|
+
bias_correct = bias_correct.set_index(ResourceMetaField.GID)
|
835
927
|
|
836
|
-
msg = (
|
837
|
-
|
838
|
-
|
928
|
+
msg = (
|
929
|
+
'Bias correction table must have "method" column but only '
|
930
|
+
"found: {}".format(list(bias_correct.columns))
|
931
|
+
)
|
932
|
+
assert "method" in bias_correct, msg
|
839
933
|
|
840
934
|
return bias_correct
|
841
935
|
|
@@ -856,13 +950,15 @@ class Gen(BaseGen):
|
|
856
950
|
output_request = self._output_request_type_check(req)
|
857
951
|
|
858
952
|
# ensure that cf_mean is requested from output
|
859
|
-
if
|
860
|
-
output_request.append(
|
953
|
+
if "cf_mean" not in output_request:
|
954
|
+
output_request.append("cf_mean")
|
861
955
|
|
862
956
|
for request in output_request:
|
863
957
|
if request not in self.OUT_ATTRS:
|
864
|
-
msg = (
|
865
|
-
|
958
|
+
msg = (
|
959
|
+
'User output request "{}" not recognized. '
|
960
|
+
"Will attempt to extract from PySAM.".format(request)
|
961
|
+
)
|
866
962
|
logger.debug(msg)
|
867
963
|
|
868
964
|
return list(set(output_request))
|
@@ -886,20 +982,19 @@ class Gen(BaseGen):
|
|
886
982
|
"""
|
887
983
|
|
888
984
|
gids = pc.project_points.gids
|
889
|
-
gid_map = kwargs.get(
|
890
|
-
bias_correct = kwargs.get(
|
985
|
+
gid_map = kwargs.get("gid_map", None)
|
986
|
+
bias_correct = kwargs.get("bias_correct", None)
|
891
987
|
|
892
988
|
if bias_correct is not None:
|
893
989
|
if gid_map is not None:
|
894
990
|
gids = [gid_map[gid] for gid in gids]
|
895
991
|
|
896
992
|
mask = bias_correct.index.isin(gids)
|
897
|
-
kwargs[
|
993
|
+
kwargs["bias_correct"] = bias_correct[mask]
|
898
994
|
|
899
995
|
return kwargs
|
900
996
|
|
901
|
-
def run(self, out_fpath=None, max_workers=1, timeout=1800,
|
902
|
-
pool_size=None):
|
997
|
+
def run(self, out_fpath=None, max_workers=1, timeout=1800, pool_size=None):
|
903
998
|
"""Execute a parallel reV generation run with smart data flushing.
|
904
999
|
|
905
1000
|
Parameters
|
@@ -937,45 +1032,68 @@ class Gen(BaseGen):
|
|
937
1032
|
if pool_size is None:
|
938
1033
|
pool_size = os.cpu_count() * 2
|
939
1034
|
|
940
|
-
kwargs = {
|
941
|
-
|
942
|
-
|
943
|
-
|
944
|
-
|
945
|
-
|
946
|
-
|
947
|
-
|
948
|
-
|
949
|
-
|
950
|
-
|
951
|
-
logger.
|
952
|
-
|
953
|
-
|
954
|
-
|
955
|
-
|
956
|
-
|
1035
|
+
kwargs = {
|
1036
|
+
"tech": self.tech,
|
1037
|
+
"res_file": self.res_file,
|
1038
|
+
"lr_res_file": self.lr_res_file,
|
1039
|
+
"output_request": self.output_request,
|
1040
|
+
"scale_outputs": self.scale_outputs,
|
1041
|
+
"gid_map": self._gid_map,
|
1042
|
+
"nn_map": self._nn_map,
|
1043
|
+
"bias_correct": self._bc,
|
1044
|
+
}
|
1045
|
+
|
1046
|
+
logger.info(
|
1047
|
+
"Running reV generation for: {}".format(self.points_control)
|
1048
|
+
)
|
1049
|
+
logger.debug(
|
1050
|
+
'The following project points were specified: "{}"'.format(
|
1051
|
+
self.project_points
|
1052
|
+
)
|
1053
|
+
)
|
1054
|
+
logger.debug(
|
1055
|
+
"The following SAM configs are available to this run:\n{}".format(
|
1056
|
+
pprint.pformat(self.sam_configs, indent=4)
|
1057
|
+
)
|
1058
|
+
)
|
1059
|
+
logger.debug(
|
1060
|
+
"The SAM output variables have been requested:\n{}".format(
|
1061
|
+
self.output_request
|
1062
|
+
)
|
1063
|
+
)
|
957
1064
|
|
958
1065
|
# use serial or parallel execution control based on max_workers
|
959
1066
|
try:
|
960
1067
|
if max_workers == 1:
|
961
|
-
logger.debug(
|
962
|
-
|
1068
|
+
logger.debug(
|
1069
|
+
"Running serial generation for: {}".format(
|
1070
|
+
self.points_control
|
1071
|
+
)
|
1072
|
+
)
|
963
1073
|
for i, pc_sub in enumerate(self.points_control):
|
964
1074
|
self.out = self._run_single_worker(pc_sub, **kwargs)
|
965
|
-
logger.info(
|
966
|
-
|
967
|
-
|
968
|
-
|
1075
|
+
logger.info(
|
1076
|
+
"Finished reV gen serial compute for: {} "
|
1077
|
+
"(iteration {} out of {})".format(
|
1078
|
+
pc_sub, i + 1, len(self.points_control)
|
1079
|
+
)
|
1080
|
+
)
|
969
1081
|
self.flush()
|
970
1082
|
else:
|
971
|
-
logger.debug(
|
972
|
-
|
973
|
-
|
974
|
-
|
975
|
-
|
1083
|
+
logger.debug(
|
1084
|
+
"Running parallel generation for: {}".format(
|
1085
|
+
self.points_control
|
1086
|
+
)
|
1087
|
+
)
|
1088
|
+
self._parallel_run(
|
1089
|
+
max_workers=max_workers,
|
1090
|
+
pool_size=pool_size,
|
1091
|
+
timeout=timeout,
|
1092
|
+
**kwargs,
|
1093
|
+
)
|
976
1094
|
|
977
1095
|
except Exception as e:
|
978
|
-
logger.exception(
|
1096
|
+
logger.exception("reV generation failed!")
|
979
1097
|
raise e
|
980
1098
|
|
981
1099
|
return self._out_fpath
|