NREL-reV 0.8.7__py3-none-any.whl → 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/METADATA +13 -10
  2. {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/RECORD +43 -43
  3. {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/WHEEL +1 -1
  4. reV/SAM/SAM.py +217 -133
  5. reV/SAM/econ.py +18 -14
  6. reV/SAM/generation.py +611 -422
  7. reV/SAM/windbos.py +93 -79
  8. reV/bespoke/bespoke.py +681 -377
  9. reV/bespoke/cli_bespoke.py +2 -0
  10. reV/bespoke/place_turbines.py +187 -43
  11. reV/config/output_request.py +2 -1
  12. reV/config/project_points.py +218 -140
  13. reV/econ/econ.py +166 -114
  14. reV/econ/economies_of_scale.py +91 -45
  15. reV/generation/base.py +331 -184
  16. reV/generation/generation.py +326 -200
  17. reV/generation/output_attributes/lcoe_fcr_inputs.json +38 -3
  18. reV/handlers/__init__.py +0 -1
  19. reV/handlers/exclusions.py +16 -15
  20. reV/handlers/multi_year.py +57 -26
  21. reV/handlers/outputs.py +6 -5
  22. reV/handlers/transmission.py +44 -27
  23. reV/hybrids/hybrid_methods.py +30 -30
  24. reV/hybrids/hybrids.py +305 -189
  25. reV/nrwal/nrwal.py +262 -168
  26. reV/qa_qc/cli_qa_qc.py +14 -10
  27. reV/qa_qc/qa_qc.py +217 -119
  28. reV/qa_qc/summary.py +228 -146
  29. reV/rep_profiles/rep_profiles.py +349 -230
  30. reV/supply_curve/aggregation.py +349 -188
  31. reV/supply_curve/competitive_wind_farms.py +90 -48
  32. reV/supply_curve/exclusions.py +138 -85
  33. reV/supply_curve/extent.py +75 -50
  34. reV/supply_curve/points.py +735 -390
  35. reV/supply_curve/sc_aggregation.py +357 -248
  36. reV/supply_curve/supply_curve.py +604 -347
  37. reV/supply_curve/tech_mapping.py +144 -82
  38. reV/utilities/__init__.py +274 -16
  39. reV/utilities/pytest_utils.py +8 -4
  40. reV/version.py +1 -1
  41. {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/LICENSE +0 -0
  42. {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/entry_points.txt +0 -0
  43. {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/top_level.txt +0 -0
@@ -2,6 +2,7 @@
2
2
  """
3
3
  reV generation module.
4
4
  """
5
+
5
6
  import copy
6
7
  import json
7
8
  import logging
@@ -10,42 +11,46 @@ import pprint
10
11
 
11
12
  import numpy as np
12
13
  import pandas as pd
13
-
14
- from reV.generation.base import BaseGen
15
- from reV.SAM.generation import (Geothermal,
16
- MhkWave,
17
- LinearDirectSteam,
18
- PvSamv1,
19
- PvWattsv5,
20
- PvWattsv7,
21
- PvWattsv8,
22
- SolarWaterHeat,
23
- TcsMoltenSalt,
24
- TroughPhysicalHeat,
25
- WindPower)
26
- from reV.utilities import ModuleName
27
- from reV.utilities.exceptions import (ConfigError,
28
- InputError,
29
- ProjectPointsValueError)
30
14
  from rex.multi_file_resource import MultiFileResource
31
15
  from rex.multi_res_resource import MultiResolutionResource
32
16
  from rex.resource import Resource
33
17
  from rex.utilities.utilities import check_res_file
34
18
 
19
+ from reV.generation.base import BaseGen
20
+ from reV.SAM.generation import (
21
+ Geothermal,
22
+ LinearDirectSteam,
23
+ MhkWave,
24
+ PvSamv1,
25
+ PvWattsv5,
26
+ PvWattsv7,
27
+ PvWattsv8,
28
+ SolarWaterHeat,
29
+ TcsMoltenSalt,
30
+ TroughPhysicalHeat,
31
+ WindPower,
32
+ )
33
+ from reV.utilities import ModuleName, ResourceMetaField, SupplyCurveField
34
+ from reV.utilities.exceptions import (
35
+ ConfigError,
36
+ InputError,
37
+ ProjectPointsValueError,
38
+ )
39
+
35
40
  logger = logging.getLogger(__name__)
36
41
 
37
42
 
38
43
  ATTR_DIR = os.path.dirname(os.path.realpath(__file__))
39
- ATTR_DIR = os.path.join(ATTR_DIR, 'output_attributes')
40
- with open(os.path.join(ATTR_DIR, 'other.json'), 'r') as f:
44
+ ATTR_DIR = os.path.join(ATTR_DIR, "output_attributes")
45
+ with open(os.path.join(ATTR_DIR, "other.json")) as f:
41
46
  OTHER_ATTRS = json.load(f)
42
- with open(os.path.join(ATTR_DIR, 'generation.json'), 'r') as f:
47
+ with open(os.path.join(ATTR_DIR, "generation.json")) as f:
43
48
  GEN_ATTRS = json.load(f)
44
- with open(os.path.join(ATTR_DIR, 'linear_fresnel.json'), 'r') as f:
49
+ with open(os.path.join(ATTR_DIR, "linear_fresnel.json")) as f:
45
50
  LIN_ATTRS = json.load(f)
46
- with open(os.path.join(ATTR_DIR, 'solar_water_heat.json'), 'r') as f:
51
+ with open(os.path.join(ATTR_DIR, "solar_water_heat.json")) as f:
47
52
  SWH_ATTRS = json.load(f)
48
- with open(os.path.join(ATTR_DIR, 'trough_heat.json'), 'r') as f:
53
+ with open(os.path.join(ATTR_DIR, "trough_heat.json")) as f:
49
54
  TPPH_ATTRS = json.load(f)
50
55
 
51
56
 
@@ -53,17 +58,19 @@ class Gen(BaseGen):
53
58
  """Gen"""
54
59
 
55
60
  # Mapping of reV technology strings to SAM generation objects
56
- OPTIONS = {'geothermal': Geothermal,
57
- 'lineardirectsteam': LinearDirectSteam,
58
- 'mhkwave': MhkWave,
59
- 'pvsamv1': PvSamv1,
60
- 'pvwattsv5': PvWattsv5,
61
- 'pvwattsv7': PvWattsv7,
62
- 'pvwattsv8': PvWattsv8,
63
- 'solarwaterheat': SolarWaterHeat,
64
- 'tcsmoltensalt': TcsMoltenSalt,
65
- 'troughphysicalheat': TroughPhysicalHeat,
66
- 'windpower': WindPower}
61
+ OPTIONS = {
62
+ "geothermal": Geothermal,
63
+ "lineardirectsteam": LinearDirectSteam,
64
+ "mhkwave": MhkWave,
65
+ "pvsamv1": PvSamv1,
66
+ "pvwattsv5": PvWattsv5,
67
+ "pvwattsv7": PvWattsv7,
68
+ "pvwattsv8": PvWattsv8,
69
+ "solarwaterheat": SolarWaterHeat,
70
+ "tcsmoltensalt": TcsMoltenSalt,
71
+ "troughphysicalheat": TroughPhysicalHeat,
72
+ "windpower": WindPower,
73
+ }
67
74
 
68
75
  """reV technology options."""
69
76
 
@@ -76,13 +83,25 @@ class Gen(BaseGen):
76
83
  OUT_ATTRS.update(TPPH_ATTRS)
77
84
  OUT_ATTRS.update(BaseGen.ECON_ATTRS)
78
85
 
79
- def __init__(self, technology, project_points, sam_files, resource_file,
80
- low_res_resource_file=None, output_request=('cf_mean',),
81
- site_data=None, curtailment=None, gid_map=None,
82
- drop_leap=False, sites_per_worker=None,
83
- memory_utilization_limit=0.4, scale_outputs=True,
84
- write_mapped_gids=False, bias_correct=None):
85
- """reV generation analysis class.
86
+ def __init__(
87
+ self,
88
+ technology,
89
+ project_points,
90
+ sam_files,
91
+ resource_file,
92
+ low_res_resource_file=None,
93
+ output_request=("cf_mean",),
94
+ site_data=None,
95
+ curtailment=None,
96
+ gid_map=None,
97
+ drop_leap=False,
98
+ sites_per_worker=None,
99
+ memory_utilization_limit=0.4,
100
+ scale_outputs=True,
101
+ write_mapped_gids=False,
102
+ bias_correct=None,
103
+ ):
104
+ """ReV generation analysis class.
86
105
 
87
106
  ``reV`` generation analysis runs SAM simulations by piping in
88
107
  renewable energy resource data (usually from the NSRDB or WTK),
@@ -94,7 +113,7 @@ class Gen(BaseGen):
94
113
  allowed and/or required SAM config file inputs. If economic
95
114
  parameters are supplied in the SAM config, then you can bundle a
96
115
  "follow-on" econ calculation by just adding the desired econ
97
- output keys to the `output_request`. You can request ``reV`` to '
116
+ output keys to the `output_request`. You can request ``reV`` to
98
117
  run the analysis for one or more "sites", which correspond to
99
118
  the meta indices in the resource data (also commonly called the
100
119
  ``gid's``).
@@ -109,7 +128,7 @@ class Gen(BaseGen):
109
128
  >>> import os
110
129
  >>> from reV import Gen, TESTDATADIR
111
130
  >>>
112
- >>> sam_tech = 'pvwattsv7'
131
+ >>> sam_tech = 'pvwattsv8'
113
132
  >>> sites = 0
114
133
  >>> fp_sam = os.path.join(TESTDATADIR, 'SAM/naris_pv_1axis_inv13.json')
115
134
  >>> fp_res = os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_2013.h5')
@@ -121,20 +140,21 @@ class Gen(BaseGen):
121
140
  {'cf_mean': array([0.16966143], dtype=float32)}
122
141
  >>>
123
142
  >>> sites = [3, 4, 7, 9]
124
- >>> req = ('cf_mean', 'cf_profile', 'lcoe_fcr')
143
+ >>> req = ('cf_mean', 'lcoe_fcr')
125
144
  >>> gen = Gen(sam_tech, sites, fp_sam, fp_res, output_request=req)
126
145
  >>> gen.run()
127
146
  >>>
128
147
  >>> gen.out
129
- {'lcoe_fcr': array([131.39166, 131.31221, 127.54539, 125.49656]),
130
- 'cf_mean': array([0.17713654, 0.17724372, 0.1824783 , 0.1854574 ]),
131
- 'cf_profile': array([[0., 0., 0., 0.],
132
- [0., 0., 0., 0.],
133
- [0., 0., 0., 0.],
134
- ...,
135
- [0., 0., 0., 0.],
136
- [0., 0., 0., 0.],
137
- [0., 0., 0., 0.]])}
148
+ {'fixed_charge_rate': array([0.096, 0.096, 0.096, 0.096],
149
+ 'base_capital_cost': array([39767200, 39767200, 39767200, 39767200],
150
+ 'base_variable_operating_cost': array([0, 0, 0, 0],
151
+ 'base_fixed_operating_cost': array([260000, 260000, 260000, 260000],
152
+ 'capital_cost': array([39767200, 39767200, 39767200, 39767200],
153
+ 'fixed_operating_cost': array([260000, 260000, 260000, 260000],
154
+ 'variable_operating_cost': array([0, 0, 0, 0],
155
+ 'capital_cost_multiplier': array([1, 1, 1, 1],
156
+ 'cf_mean': array([0.17859147, 0.17869979, 0.1834818 , 0.18646291],
157
+ 'lcoe_fcr': array([130.32126, 130.24226, 126.84782, 124.81981]}
138
158
 
139
159
  Parameters
140
160
  ----------
@@ -361,22 +381,33 @@ class Gen(BaseGen):
361
381
  ``bias_correct`` table on a site-by-site basis. If ``None``, no
362
382
  corrections are applied. By default, ``None``.
363
383
  """
364
- pc = self.get_pc(points=project_points, points_range=None,
365
- sam_configs=sam_files, tech=technology,
366
- sites_per_worker=sites_per_worker,
367
- res_file=resource_file,
368
- curtailment=curtailment)
369
-
370
- super().__init__(pc, output_request, site_data=site_data,
371
- drop_leap=drop_leap,
372
- memory_utilization_limit=memory_utilization_limit,
373
- scale_outputs=scale_outputs)
384
+ pc = self.get_pc(
385
+ points=project_points,
386
+ points_range=None,
387
+ sam_configs=sam_files,
388
+ tech=technology,
389
+ sites_per_worker=sites_per_worker,
390
+ res_file=resource_file,
391
+ curtailment=curtailment,
392
+ )
393
+
394
+ super().__init__(
395
+ pc,
396
+ output_request,
397
+ site_data=site_data,
398
+ drop_leap=drop_leap,
399
+ memory_utilization_limit=memory_utilization_limit,
400
+ scale_outputs=scale_outputs,
401
+ )
374
402
 
375
403
  if self.tech not in self.OPTIONS:
376
- msg = ('Requested technology "{}" is not available. '
377
- 'reV generation can analyze the following '
378
- 'SAM technologies: {}'
379
- .format(self.tech, list(self.OPTIONS.keys())))
404
+ msg = (
405
+ 'Requested technology "{}" is not available. '
406
+ "reV generation can analyze the following "
407
+ "SAM technologies: {}".format(
408
+ self.tech, list(self.OPTIONS.keys())
409
+ )
410
+ )
380
411
  logger.error(msg)
381
412
  raise KeyError(msg)
382
413
 
@@ -384,8 +415,8 @@ class Gen(BaseGen):
384
415
  self._res_file = resource_file
385
416
  self._lr_res_file = low_res_resource_file
386
417
  self._sam_module = self.OPTIONS[self.tech]
387
- self._run_attrs['sam_module'] = self._sam_module.MODULE
388
- self._run_attrs['res_file'] = resource_file
418
+ self._run_attrs["sam_module"] = self._sam_module.MODULE
419
+ self._run_attrs["res_file"] = resource_file
389
420
 
390
421
  self._multi_h5_res, self._hsds = check_res_file(resource_file)
391
422
  self._gid_map = self._parse_gid_map(gid_map)
@@ -424,11 +455,12 @@ class Gen(BaseGen):
424
455
  Meta data df for sites in project points. Column names are meta
425
456
  data variables, rows are different sites. The row index
426
457
  does not indicate the site number if the project points are
427
- non-sequential or do not start from 0, so a 'gid' column is added.
458
+ non-sequential or do not start from 0, so a `SiteDataField.GID`
459
+ column is added.
428
460
  """
429
461
  if self._meta is None:
430
462
  res_cls = Resource
431
- kwargs = {'hsds': self._hsds}
463
+ kwargs = {"hsds": self._hsds}
432
464
  if self._multi_h5_res:
433
465
  res_cls = MultiFileResource
434
466
  kwargs = {}
@@ -438,25 +470,28 @@ class Gen(BaseGen):
438
470
  res_gids = [self._gid_map[i] for i in res_gids]
439
471
 
440
472
  with res_cls(self.res_file, **kwargs) as res:
441
- meta_len = res.shapes['meta'][0]
473
+ meta_len = res.shapes["meta"][0]
442
474
 
443
475
  if np.max(res_gids) > meta_len:
444
- msg = ('ProjectPoints has a max site gid of {} which is '
445
- 'out of bounds for the meta data of len {} from '
446
- 'resource file: {}'
447
- .format(np.max(res_gids),
448
- meta_len, self.res_file))
476
+ msg = (
477
+ "ProjectPoints has a max site gid of {} which is "
478
+ "out of bounds for the meta data of len {} from "
479
+ "resource file: {}".format(
480
+ np.max(res_gids), meta_len, self.res_file
481
+ )
482
+ )
449
483
  logger.error(msg)
450
484
  raise ProjectPointsValueError(msg)
451
485
 
452
- self._meta = res['meta', res_gids]
486
+ self._meta = res["meta", res_gids]
453
487
 
454
- self._meta.loc[:, 'gid'] = res_gids
488
+ self._meta.loc[:, ResourceMetaField.GID] = res_gids
455
489
  if self.write_mapped_gids:
456
- self._meta.loc[:, 'gid'] = self.project_points.sites
490
+ sites = self.project_points.sites
491
+ self._meta.loc[:, ResourceMetaField.GID] = sites
457
492
  self._meta.index = self.project_points.sites
458
- self._meta.index.name = 'gid'
459
- self._meta.loc[:, 'reV_tech'] = self.project_points.tech
493
+ self._meta.index.name = ResourceMetaField.GID
494
+ self._meta.loc[:, "reV_tech"] = self.project_points.tech
460
495
 
461
496
  return self._meta
462
497
 
@@ -472,7 +507,7 @@ class Gen(BaseGen):
472
507
  if self._time_index is None:
473
508
  if not self._multi_h5_res:
474
509
  res_cls = Resource
475
- kwargs = {'hsds': self._hsds}
510
+ kwargs = {"hsds": self._hsds}
476
511
  else:
477
512
  res_cls = MultiFileResource
478
513
  kwargs = {}
@@ -484,19 +519,22 @@ class Gen(BaseGen):
484
519
  step = self.project_points.sam_config_obj.time_index_step
485
520
  if downscale is not None:
486
521
  from rex.utilities.downscale import make_time_index
522
+
487
523
  year = time_index.year[0]
488
- ds_freq = downscale['frequency']
524
+ ds_freq = downscale["frequency"]
489
525
  time_index = make_time_index(year, ds_freq)
490
- logger.info('reV solar generation running with temporal '
491
- 'downscaling frequency "{}" with final '
492
- 'time_index length {}'
493
- .format(ds_freq, len(time_index)))
526
+ logger.info(
527
+ "reV solar generation running with temporal "
528
+ 'downscaling frequency "{}" with final '
529
+ "time_index length {}".format(ds_freq, len(time_index))
530
+ )
494
531
  elif step is not None:
495
532
  time_index = time_index[::step]
496
533
 
497
534
  time_index = self.handle_lifetime_index(time_index)
498
- time_index = self.handle_leap_ti(time_index,
499
- drop_leap=self._drop_leap)
535
+ time_index = self.handle_leap_ti(
536
+ time_index, drop_leap=self._drop_leap
537
+ )
500
538
 
501
539
  self._time_index = time_index
502
540
 
@@ -530,30 +568,32 @@ class Gen(BaseGen):
530
568
  # Only one time index may be passed, check that lifetime periods match
531
569
  n_unique_periods = len(np.unique(lifetime_periods))
532
570
  if n_unique_periods != 1:
533
- msg = ('reV cannot handle multiple analysis_periods when '
534
- 'modeling with `system_use_lifetime_output` set '
535
- 'to 1. Found {} different analysis_periods in the SAM '
536
- 'configs'.format(n_unique_periods))
571
+ msg = (
572
+ "reV cannot handle multiple analysis_periods when "
573
+ "modeling with `system_use_lifetime_output` set "
574
+ "to 1. Found {} different analysis_periods in the SAM "
575
+ "configs".format(n_unique_periods)
576
+ )
537
577
  logger.error(msg)
538
578
  raise ConfigError(msg)
539
579
 
540
580
  # Collect requested variables to check for lifetime compatibility
541
581
  array_vars = [
542
- var for var, attrs in GEN_ATTRS.items()
543
- if attrs['type'] == 'array'
582
+ var for var, attrs in GEN_ATTRS.items() if attrs["type"] == "array"
544
583
  ]
545
- valid_vars = ['gen_profile', 'cf_profile', 'cf_profile_ac']
584
+ valid_vars = ["gen_profile", "cf_profile", "cf_profile_ac"]
546
585
  invalid_vars = set(array_vars) - set(valid_vars)
547
- invalid_requests = [var for var in self.output_request
548
- if var in invalid_vars]
586
+ invalid_requests = [
587
+ var for var in self.output_request if var in invalid_vars
588
+ ]
549
589
 
550
590
  if invalid_requests:
551
591
  # SAM does not output full lifetime for all array variables
552
592
  msg = (
553
- 'reV can only handle the following output arrays '
554
- 'when modeling with `system_use_lifetime_output` set '
555
- 'to 1: {}. Try running without {}.'.format(
556
- ', '.join(valid_vars), ', '.join(invalid_requests)
593
+ "reV can only handle the following output arrays "
594
+ "when modeling with `system_use_lifetime_output` set "
595
+ "to 1: {}. Try running without {}.".format(
596
+ ", ".join(valid_vars), ", ".join(invalid_requests)
557
597
  )
558
598
  )
559
599
  logger.error(msg)
@@ -561,8 +601,10 @@ class Gen(BaseGen):
561
601
 
562
602
  sam_meta = self.sam_metas[next(iter(self.sam_metas))]
563
603
  analysis_period = sam_meta["analysis_period"]
564
- logger.info('reV generation running with a full system '
565
- 'life of {} years.'.format(analysis_period))
604
+ logger.info(
605
+ "reV generation running with a full system "
606
+ "life of {} years.".format(analysis_period)
607
+ )
566
608
 
567
609
  old_end = ti[-1]
568
610
  new_end = old_end + pd.DateOffset(years=analysis_period - 1)
@@ -573,10 +615,18 @@ class Gen(BaseGen):
573
615
  return ti
574
616
 
575
617
  @classmethod
576
- def _run_single_worker(cls, points_control, tech=None, res_file=None,
577
- lr_res_file=None, output_request=None,
578
- scale_outputs=True, gid_map=None, nn_map=None,
579
- bias_correct=None):
618
+ def _run_single_worker(
619
+ cls,
620
+ points_control,
621
+ tech=None,
622
+ res_file=None,
623
+ lr_res_file=None,
624
+ output_request=None,
625
+ scale_outputs=True,
626
+ gid_map=None,
627
+ nn_map=None,
628
+ bias_correct=None,
629
+ ):
580
630
  """Run a SAM generation analysis based on the points_control iterator.
581
631
 
582
632
  Parameters
@@ -637,21 +687,24 @@ class Gen(BaseGen):
637
687
 
638
688
  # Extract the site df from the project points df.
639
689
  site_df = points_control.project_points.df
640
- site_df = site_df.set_index('gid', drop=True)
690
+ site_df = site_df.set_index(ResourceMetaField.GID, drop=True)
641
691
 
642
692
  # run generation method for specified technology
643
693
  try:
644
694
  out = cls.OPTIONS[tech].reV_run(
645
- points_control, res_file, site_df,
695
+ points_control,
696
+ res_file,
697
+ site_df,
646
698
  lr_res_file=lr_res_file,
647
699
  output_request=output_request,
648
- gid_map=gid_map, nn_map=nn_map,
649
- bias_correct=bias_correct
700
+ gid_map=gid_map,
701
+ nn_map=nn_map,
702
+ bias_correct=bias_correct,
650
703
  )
651
704
 
652
705
  except Exception as e:
653
706
  out = {}
654
- logger.exception('Worker failed for PC: {}'.format(points_control))
707
+ logger.exception("Worker failed for PC: {}".format(points_control))
655
708
  raise e
656
709
 
657
710
  if scale_outputs:
@@ -660,9 +713,11 @@ class Gen(BaseGen):
660
713
  for k in site_output.keys():
661
714
  # iterate through variable names in each site's output dict
662
715
  if k in cls.OUT_ATTRS:
716
+ if out[site][k] is None:
717
+ continue
663
718
  # get dtype and scale for output variable name
664
- dtype = cls.OUT_ATTRS[k].get('dtype', 'float32')
665
- scale_factor = cls.OUT_ATTRS[k].get('scale_factor', 1)
719
+ dtype = cls.OUT_ATTRS[k].get("dtype", "float32")
720
+ scale_factor = cls.OUT_ATTRS[k].get("scale_factor", 1)
666
721
 
667
722
  # apply scale factor and dtype
668
723
  out[site][k] *= scale_factor
@@ -676,8 +731,9 @@ class Gen(BaseGen):
676
731
  out[site][k] = out[site][k].astype(dtype)
677
732
  else:
678
733
  # use numpy array conversion for scalar values
679
- out[site][k] = np.array([out[site][k]],
680
- dtype=dtype)[0]
734
+ out[site][k] = np.array(
735
+ [out[site][k]], dtype=dtype
736
+ )[0]
681
737
 
682
738
  return out
683
739
 
@@ -701,44 +757,56 @@ class Gen(BaseGen):
701
757
  """
702
758
 
703
759
  if isinstance(gid_map, str):
704
- if gid_map.endswith('.csv'):
760
+ if gid_map.endswith(".csv"):
705
761
  gid_map = pd.read_csv(gid_map).to_dict()
706
- assert 'gid' in gid_map, 'Need "gid" in gid_map column'
707
- assert 'gid_map' in gid_map, 'Need "gid_map" in gid_map column'
708
- gid_map = {gid_map['gid'][i]: gid_map['gid_map'][i]
709
- for i in gid_map['gid'].keys()}
762
+ msg = f"Need {ResourceMetaField.GID} in gid_map column"
763
+ assert ResourceMetaField.GID in gid_map, msg
764
+ assert "gid_map" in gid_map, 'Need "gid_map" in gid_map column'
765
+ gid_map = {
766
+ gid_map[ResourceMetaField.GID][i]: gid_map["gid_map"][i]
767
+ for i in gid_map[ResourceMetaField.GID].keys()
768
+ }
710
769
 
711
- elif gid_map.endswith('.json'):
712
- with open(gid_map, 'r') as f:
770
+ elif gid_map.endswith(".json"):
771
+ with open(gid_map) as f:
713
772
  gid_map = json.load(f)
714
773
 
715
774
  if isinstance(gid_map, dict):
716
775
  if not self._multi_h5_res:
717
776
  res_cls = Resource
718
- kwargs = {'hsds': self._hsds}
777
+ kwargs = {"hsds": self._hsds}
719
778
  else:
720
779
  res_cls = MultiFileResource
721
780
  kwargs = {}
722
781
 
723
782
  with res_cls(self.res_file, **kwargs) as res:
724
783
  for gen_gid, res_gid in gid_map.items():
725
- msg1 = ('gid_map values must all be int but received '
726
- '{}: {}'.format(gen_gid, res_gid))
727
- msg2 = ('Could not find the gen_gid to res_gid mapping '
728
- '{}: {} in the resource meta data.'
729
- .format(gen_gid, res_gid))
784
+ msg1 = (
785
+ "gid_map values must all be int but received "
786
+ "{}: {}".format(gen_gid, res_gid)
787
+ )
788
+ msg2 = (
789
+ "Could not find the gen_gid to res_gid mapping "
790
+ "{}: {} in the resource meta data.".format(
791
+ gen_gid, res_gid
792
+ )
793
+ )
730
794
  assert isinstance(gen_gid, int), msg1
731
795
  assert isinstance(res_gid, int), msg1
732
796
  assert res_gid in res.meta.index.values, msg2
733
797
 
734
798
  for gen_gid in self.project_points.sites:
735
- msg3 = ('Could not find the project points gid {} in the '
736
- 'gen_gid input of the gid_map.'.format(gen_gid))
799
+ msg3 = (
800
+ "Could not find the project points gid {} in the "
801
+ "gen_gid input of the gid_map.".format(gen_gid)
802
+ )
737
803
  assert gen_gid in gid_map, msg3
738
804
 
739
805
  elif gid_map is not None:
740
- msg = ('Could not parse gid_map, must be None, dict, or path to '
741
- 'csv or json, but received: {}'.format(gid_map))
806
+ msg = (
807
+ "Could not parse gid_map, must be None, dict, or path to "
808
+ "csv or json, but received: {}".format(gid_map)
809
+ )
742
810
  logger.error(msg)
743
811
  raise InputError(msg)
744
812
 
@@ -758,24 +826,32 @@ class Gen(BaseGen):
758
826
  """
759
827
  nn_map = None
760
828
  if self.lr_res_file is not None:
761
-
762
829
  handler_class = Resource
763
- if '*' in self.res_file or '*' in self.lr_res_file:
830
+ if "*" in self.res_file or "*" in self.lr_res_file:
764
831
  handler_class = MultiFileResource
765
832
 
766
- with handler_class(self.res_file) as hr_res:
767
- with handler_class(self.lr_res_file) as lr_res:
768
- logger.info('Making nearest neighbor map for multi '
769
- 'resolution resource data...')
770
- nn_d, nn_map = MultiResolutionResource.make_nn_map(hr_res,
771
- lr_res)
772
- logger.info('Done making nearest neighbor map for multi '
773
- 'resolution resource data!')
833
+ with handler_class(self.res_file) as hr_res, handler_class(
834
+ self.lr_res_file
835
+ ) as lr_res:
836
+ logger.info(
837
+ "Making nearest neighbor map for multi "
838
+ "resolution resource data..."
839
+ )
840
+ nn_d, nn_map = MultiResolutionResource.make_nn_map(
841
+ hr_res, lr_res
842
+ )
843
+ logger.info(
844
+ "Done making nearest neighbor map for multi "
845
+ "resolution resource data!"
846
+ )
774
847
 
775
- logger.info('Made nearest neighbor mapping between nominal-'
776
- 'resolution and low-resolution resource files. '
777
- 'Min / mean / max dist: {:.3f} / {:.3f} / {:.3f}'
778
- .format(nn_d.min(), nn_d.mean(), nn_d.max()))
848
+ logger.info(
849
+ "Made nearest neighbor mapping between nominal-"
850
+ "resolution and low-resolution resource files. "
851
+ "Min / mean / max dist: {:.3f} / {:.3f} / {:.3f}".format(
852
+ nn_d.min(), nn_d.mean(), nn_d.max()
853
+ )
854
+ )
779
855
 
780
856
  return nn_map
781
857
 
@@ -829,23 +905,34 @@ class Gen(BaseGen):
829
905
  if isinstance(bias_correct, type(None)):
830
906
  return bias_correct
831
907
 
832
- elif isinstance(bias_correct, str):
833
- bias_correct = pd.read_csv(bias_correct)
908
+ if isinstance(bias_correct, str):
909
+ bias_correct = pd.read_csv(bias_correct).rename(
910
+ SupplyCurveField.map_to(ResourceMetaField), axis=1
911
+ )
834
912
 
835
- msg = ('Bias correction data must be a filepath to csv or a dataframe '
836
- 'but received: {}'.format(type(bias_correct)))
913
+ msg = (
914
+ "Bias correction data must be a filepath to csv or a dataframe "
915
+ "but received: {}".format(type(bias_correct))
916
+ )
837
917
  assert isinstance(bias_correct, pd.DataFrame), msg
838
918
 
839
- msg = ('Bias correction table must have "gid" column but only found: '
840
- '{}'.format(list(bias_correct.columns)))
841
- assert 'gid' in bias_correct or bias_correct.index.name == 'gid', msg
919
+ msg = (
920
+ "Bias correction table must have {!r} column but only found: "
921
+ "{}".format(ResourceMetaField.GID, list(bias_correct.columns))
922
+ )
923
+ assert (
924
+ ResourceMetaField.GID in bias_correct
925
+ or bias_correct.index.name == ResourceMetaField.GID
926
+ ), msg
842
927
 
843
- if bias_correct.index.name != 'gid':
844
- bias_correct = bias_correct.set_index('gid')
928
+ if bias_correct.index.name != ResourceMetaField.GID:
929
+ bias_correct = bias_correct.set_index(ResourceMetaField.GID)
845
930
 
846
- msg = ('Bias correction table must have "method" column but only '
847
- 'found: {}'.format(list(bias_correct.columns)))
848
- assert 'method' in bias_correct, msg
931
+ msg = (
932
+ 'Bias correction table must have "method" column but only '
933
+ "found: {}".format(list(bias_correct.columns))
934
+ )
935
+ assert "method" in bias_correct, msg
849
936
 
850
937
  return bias_correct
851
938
 
@@ -863,16 +950,26 @@ class Gen(BaseGen):
863
950
  Output variables requested from SAM.
864
951
  """
865
952
 
866
- output_request = self._output_request_type_check(req)
953
+ output_request = super()._parse_output_request(req)
867
954
 
868
955
  # ensure that cf_mean is requested from output
869
- if 'cf_mean' not in output_request:
870
- output_request.append('cf_mean')
956
+ if "cf_mean" not in output_request:
957
+ output_request.append("cf_mean")
958
+
959
+ if _is_solar_run_with_ac_outputs(self.tech):
960
+ if "dc_ac_ratio" not in output_request:
961
+ output_request.append("dc_ac_ratio")
962
+ for dset in ["cf_mean", "cf_profile"]:
963
+ ac_dset = f"{dset}_ac"
964
+ if dset in output_request and ac_dset not in output_request:
965
+ output_request.append(ac_dset)
871
966
 
872
967
  for request in output_request:
873
968
  if request not in self.OUT_ATTRS:
874
- msg = ('User output request "{}" not recognized. '
875
- 'Will attempt to extract from PySAM.'.format(request))
969
+ msg = (
970
+ 'User output request "{}" not recognized. '
971
+ "Will attempt to extract from PySAM.".format(request)
972
+ )
876
973
  logger.debug(msg)
877
974
 
878
975
  return list(set(output_request))
@@ -896,20 +993,19 @@ class Gen(BaseGen):
896
993
  """
897
994
 
898
995
  gids = pc.project_points.gids
899
- gid_map = kwargs.get('gid_map', None)
900
- bias_correct = kwargs.get('bias_correct', None)
996
+ gid_map = kwargs.get("gid_map", None)
997
+ bias_correct = kwargs.get("bias_correct", None)
901
998
 
902
999
  if bias_correct is not None:
903
1000
  if gid_map is not None:
904
1001
  gids = [gid_map[gid] for gid in gids]
905
1002
 
906
1003
  mask = bias_correct.index.isin(gids)
907
- kwargs['bias_correct'] = bias_correct[mask]
1004
+ kwargs["bias_correct"] = bias_correct[mask]
908
1005
 
909
1006
  return kwargs
910
1007
 
911
- def run(self, out_fpath=None, max_workers=1, timeout=1800,
912
- pool_size=None):
1008
+ def run(self, out_fpath=None, max_workers=1, timeout=1800, pool_size=None):
913
1009
  """Execute a parallel reV generation run with smart data flushing.
914
1010
 
915
1011
  Parameters
@@ -947,45 +1043,75 @@ class Gen(BaseGen):
947
1043
  if pool_size is None:
948
1044
  pool_size = os.cpu_count() * 2
949
1045
 
950
- kwargs = {'tech': self.tech,
951
- 'res_file': self.res_file,
952
- 'lr_res_file': self.lr_res_file,
953
- 'output_request': self.output_request,
954
- 'scale_outputs': self.scale_outputs,
955
- 'gid_map': self._gid_map,
956
- 'nn_map': self._nn_map,
957
- 'bias_correct': self._bc}
958
-
959
- logger.info('Running reV generation for: {}'
960
- .format(self.points_control))
961
- logger.debug('The following project points were specified: "{}"'
962
- .format(self.project_points))
963
- logger.debug('The following SAM configs are available to this run:\n{}'
964
- .format(pprint.pformat(self.sam_configs, indent=4)))
965
- logger.debug('The SAM output variables have been requested:\n{}'
966
- .format(self.output_request))
1046
+ kwargs = {
1047
+ "tech": self.tech,
1048
+ "res_file": self.res_file,
1049
+ "lr_res_file": self.lr_res_file,
1050
+ "output_request": self.output_request,
1051
+ "scale_outputs": self.scale_outputs,
1052
+ "gid_map": self._gid_map,
1053
+ "nn_map": self._nn_map,
1054
+ "bias_correct": self._bc,
1055
+ }
1056
+
1057
+ logger.info(
1058
+ "Running reV generation for: {}".format(self.points_control)
1059
+ )
1060
+ logger.debug(
1061
+ 'The following project points were specified: "{}"'.format(
1062
+ self.project_points
1063
+ )
1064
+ )
1065
+ logger.debug(
1066
+ "The following SAM configs are available to this run:\n{}".format(
1067
+ pprint.pformat(self.sam_configs, indent=4)
1068
+ )
1069
+ )
1070
+ logger.debug(
1071
+ "The SAM output variables have been requested:\n{}".format(
1072
+ self.output_request
1073
+ )
1074
+ )
967
1075
 
968
1076
  # use serial or parallel execution control based on max_workers
969
1077
  try:
970
1078
  if max_workers == 1:
971
- logger.debug('Running serial generation for: {}'
972
- .format(self.points_control))
1079
+ logger.debug(
1080
+ "Running serial generation for: {}".format(
1081
+ self.points_control
1082
+ )
1083
+ )
973
1084
  for i, pc_sub in enumerate(self.points_control):
974
1085
  self.out = self._run_single_worker(pc_sub, **kwargs)
975
- logger.info('Finished reV gen serial compute for: {} '
976
- '(iteration {} out of {})'
977
- .format(pc_sub, i + 1,
978
- len(self.points_control)))
1086
+ logger.info(
1087
+ "Finished reV gen serial compute for: {} "
1088
+ "(iteration {} out of {})".format(
1089
+ pc_sub, i + 1, len(self.points_control)
1090
+ )
1091
+ )
979
1092
  self.flush()
980
1093
  else:
981
- logger.debug('Running parallel generation for: {}'
982
- .format(self.points_control))
983
- self._parallel_run(max_workers=max_workers,
984
- pool_size=pool_size, timeout=timeout,
985
- **kwargs)
1094
+ logger.debug(
1095
+ "Running parallel generation for: {}".format(
1096
+ self.points_control
1097
+ )
1098
+ )
1099
+ self._parallel_run(
1100
+ max_workers=max_workers,
1101
+ pool_size=pool_size,
1102
+ timeout=timeout,
1103
+ **kwargs,
1104
+ )
986
1105
 
987
1106
  except Exception as e:
988
- logger.exception('reV generation failed!')
1107
+ logger.exception("reV generation failed!")
989
1108
  raise e
990
1109
 
991
1110
  return self._out_fpath
1111
+
1112
+
1113
+ def _is_solar_run_with_ac_outputs(tech):
1114
+ """True if tech is pvwattsv8+"""
1115
+ if "pvwatts" not in tech.casefold():
1116
+ return False
1117
+ return tech.casefold() not in {f"pvwattsv{i}" for i in range(8)}