NREL-reV 0.8.7__py3-none-any.whl → 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/METADATA +13 -10
  2. {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/RECORD +43 -43
  3. {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/WHEEL +1 -1
  4. reV/SAM/SAM.py +217 -133
  5. reV/SAM/econ.py +18 -14
  6. reV/SAM/generation.py +611 -422
  7. reV/SAM/windbos.py +93 -79
  8. reV/bespoke/bespoke.py +681 -377
  9. reV/bespoke/cli_bespoke.py +2 -0
  10. reV/bespoke/place_turbines.py +187 -43
  11. reV/config/output_request.py +2 -1
  12. reV/config/project_points.py +218 -140
  13. reV/econ/econ.py +166 -114
  14. reV/econ/economies_of_scale.py +91 -45
  15. reV/generation/base.py +331 -184
  16. reV/generation/generation.py +326 -200
  17. reV/generation/output_attributes/lcoe_fcr_inputs.json +38 -3
  18. reV/handlers/__init__.py +0 -1
  19. reV/handlers/exclusions.py +16 -15
  20. reV/handlers/multi_year.py +57 -26
  21. reV/handlers/outputs.py +6 -5
  22. reV/handlers/transmission.py +44 -27
  23. reV/hybrids/hybrid_methods.py +30 -30
  24. reV/hybrids/hybrids.py +305 -189
  25. reV/nrwal/nrwal.py +262 -168
  26. reV/qa_qc/cli_qa_qc.py +14 -10
  27. reV/qa_qc/qa_qc.py +217 -119
  28. reV/qa_qc/summary.py +228 -146
  29. reV/rep_profiles/rep_profiles.py +349 -230
  30. reV/supply_curve/aggregation.py +349 -188
  31. reV/supply_curve/competitive_wind_farms.py +90 -48
  32. reV/supply_curve/exclusions.py +138 -85
  33. reV/supply_curve/extent.py +75 -50
  34. reV/supply_curve/points.py +735 -390
  35. reV/supply_curve/sc_aggregation.py +357 -248
  36. reV/supply_curve/supply_curve.py +604 -347
  37. reV/supply_curve/tech_mapping.py +144 -82
  38. reV/utilities/__init__.py +274 -16
  39. reV/utilities/pytest_utils.py +8 -4
  40. reV/version.py +1 -1
  41. {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/LICENSE +0 -0
  42. {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/entry_points.txt +0 -0
  43. {NREL_reV-0.8.7.dist-info → NREL_reV-0.9.0.dist-info}/top_level.txt +0 -0
@@ -1,27 +1,29 @@
1
1
  # -*- coding: utf-8 -*-
2
- """
3
- reV aggregation framework.
4
- """
2
+ """reV aggregation framework."""
3
+
4
+ import logging
5
+ import os
5
6
  from abc import ABC, abstractmethod
7
+
6
8
  import h5py
7
- import logging
8
9
  import numpy as np
9
- import os
10
10
  import pandas as pd
11
+ from rex.resource import Resource
12
+ from rex.utilities.execution import SpawnProcessPool
13
+ from rex.utilities.loggers import log_mem
11
14
 
12
- from reV.handlers.outputs import Outputs
13
15
  from reV.handlers.exclusions import ExclusionLayers
16
+ from reV.handlers.outputs import Outputs
14
17
  from reV.supply_curve.exclusions import ExclusionMaskFromDict
15
18
  from reV.supply_curve.extent import SupplyCurveExtent
16
- from reV.supply_curve.tech_mapping import TechMapping
17
19
  from reV.supply_curve.points import AggregationSupplyCurvePoint
18
- from reV.utilities.exceptions import (EmptySupplyCurvePointError,
19
- FileInputError, SupplyCurveInputError)
20
- from reV.utilities import log_versions
21
-
22
- from rex.resource import Resource
23
- from rex.utilities.execution import SpawnProcessPool
24
- from rex.utilities.loggers import log_mem
20
+ from reV.supply_curve.tech_mapping import TechMapping
21
+ from reV.utilities import ResourceMetaField, SupplyCurveField, log_versions
22
+ from reV.utilities.exceptions import (
23
+ EmptySupplyCurvePointError,
24
+ FileInputError,
25
+ SupplyCurveInputError,
26
+ )
25
27
 
26
28
  logger = logging.getLogger(__name__)
27
29
 
@@ -29,8 +31,13 @@ logger = logging.getLogger(__name__)
29
31
  class AbstractAggFileHandler(ABC):
30
32
  """Simple framework to handle aggregation file context managers."""
31
33
 
32
- def __init__(self, excl_fpath, excl_dict=None, area_filter_kernel='queen',
33
- min_area=None):
34
+ def __init__(
35
+ self,
36
+ excl_fpath,
37
+ excl_dict=None,
38
+ area_filter_kernel="queen",
39
+ min_area=None,
40
+ ):
34
41
  """
35
42
  Parameters
36
43
  ----------
@@ -51,9 +58,12 @@ class AbstractAggFileHandler(ABC):
51
58
  by default None
52
59
  """
53
60
  self._excl_fpath = excl_fpath
54
- self._excl = ExclusionMaskFromDict(excl_fpath, layers_dict=excl_dict,
55
- min_area=min_area,
56
- kernel=area_filter_kernel)
61
+ self._excl = ExclusionMaskFromDict(
62
+ excl_fpath,
63
+ layers_dict=excl_dict,
64
+ min_area=min_area,
65
+ kernel=area_filter_kernel,
66
+ )
57
67
 
58
68
  def __enter__(self):
59
69
  return self
@@ -95,9 +105,15 @@ class AggFileHandler(AbstractAggFileHandler):
95
105
 
96
106
  DEFAULT_H5_HANDLER = Resource
97
107
 
98
- def __init__(self, excl_fpath, h5_fpath, excl_dict=None,
99
- area_filter_kernel='queen', min_area=None,
100
- h5_handler=None):
108
+ def __init__(
109
+ self,
110
+ excl_fpath,
111
+ h5_fpath,
112
+ excl_dict=None,
113
+ area_filter_kernel="queen",
114
+ min_area=None,
115
+ h5_handler=None,
116
+ ):
101
117
  """
102
118
  Parameters
103
119
  ----------
@@ -121,9 +137,12 @@ class AggFileHandler(AbstractAggFileHandler):
121
137
  Optional special handler similar to the rex.Resource handler which
122
138
  is default.
123
139
  """
124
- super().__init__(excl_fpath, excl_dict=excl_dict,
125
- area_filter_kernel=area_filter_kernel,
126
- min_area=min_area)
140
+ super().__init__(
141
+ excl_fpath,
142
+ excl_dict=excl_dict,
143
+ area_filter_kernel=area_filter_kernel,
144
+ min_area=min_area,
145
+ )
127
146
 
128
147
  if h5_handler is None:
129
148
  self._h5 = Resource(h5_fpath)
@@ -152,10 +171,19 @@ class BaseAggregation(ABC):
152
171
  """Abstract supply curve points aggregation framework based on only an
153
172
  exclusion file and techmap."""
154
173
 
155
- def __init__(self, excl_fpath, tm_dset, excl_dict=None,
156
- area_filter_kernel='queen', min_area=None,
157
- resolution=64, excl_area=None, res_fpath=None, gids=None,
158
- pre_extract_inclusions=False):
174
+ def __init__(
175
+ self,
176
+ excl_fpath,
177
+ tm_dset,
178
+ excl_dict=None,
179
+ area_filter_kernel="queen",
180
+ min_area=None,
181
+ resolution=64,
182
+ excl_area=None,
183
+ res_fpath=None,
184
+ gids=None,
185
+ pre_extract_inclusions=False,
186
+ ):
159
187
  """
160
188
  Parameters
161
189
  ----------
@@ -208,12 +236,15 @@ class BaseAggregation(ABC):
208
236
  self._validate_tech_mapping()
209
237
 
210
238
  if pre_extract_inclusions:
211
- self._inclusion_mask = \
239
+ self._inclusion_mask = (
212
240
  ExclusionMaskFromDict.extract_inclusion_mask(
213
- excl_fpath, tm_dset,
241
+ excl_fpath,
242
+ tm_dset,
214
243
  excl_dict=excl_dict,
215
244
  area_filter_kernel=area_filter_kernel,
216
- min_area=min_area)
245
+ min_area=min_area,
246
+ )
247
+ )
217
248
  else:
218
249
  self._inclusion_mask = None
219
250
 
@@ -228,20 +259,28 @@ class BaseAggregation(ABC):
228
259
  if tm_in_excl:
229
260
  logger.info('Found techmap "{}".'.format(self._tm_dset))
230
261
  elif not tm_in_excl and not excl_fp_is_str:
231
- msg = ('Could not find techmap dataset "{}" and cannot run '
232
- 'techmap with arbitrary multiple exclusion filepaths '
233
- 'to write to: {}'.format(self._tm_dset, self._excl_fpath))
262
+ msg = (
263
+ 'Could not find techmap dataset "{}" and cannot run '
264
+ "techmap with arbitrary multiple exclusion filepaths "
265
+ "to write to: {}".format(self._tm_dset, self._excl_fpath)
266
+ )
234
267
  logger.error(msg)
235
268
  raise RuntimeError(msg)
236
269
  else:
237
- logger.info('Could not find techmap "{}". Running techmap module.'
238
- .format(self._tm_dset))
270
+ logger.info(
271
+ 'Could not find techmap "{}". Running techmap module.'.format(
272
+ self._tm_dset
273
+ )
274
+ )
239
275
  try:
240
- TechMapping.run(self._excl_fpath, self._res_fpath,
241
- dset=self._tm_dset)
276
+ TechMapping.run(
277
+ self._excl_fpath, self._res_fpath, dset=self._tm_dset
278
+ )
242
279
  except Exception as e:
243
- msg = ('TechMapping process failed. Received the '
244
- 'following error:\n{}'.format(e))
280
+ msg = (
281
+ "TechMapping process failed. Received the "
282
+ "following error:\n{}".format(e)
283
+ )
245
284
  logger.exception(msg)
246
285
  raise RuntimeError(msg) from e
247
286
 
@@ -255,8 +294,9 @@ class BaseAggregation(ABC):
255
294
  ndarray
256
295
  """
257
296
  if self._gids is None:
258
- with SupplyCurveExtent(self._excl_fpath,
259
- resolution=self._resolution) as sc:
297
+ with SupplyCurveExtent(
298
+ self._excl_fpath, resolution=self._resolution
299
+ ) as sc:
260
300
  self._gids = sc.valid_sc_points(self._tm_dset)
261
301
  elif np.issubdtype(type(self._gids), np.number):
262
302
  self._gids = np.array([self._gids])
@@ -274,8 +314,9 @@ class BaseAggregation(ABC):
274
314
  tuple
275
315
  """
276
316
  if self._shape is None:
277
- with SupplyCurveExtent(self._excl_fpath,
278
- resolution=self._resolution) as sc:
317
+ with SupplyCurveExtent(
318
+ self._excl_fpath, resolution=self._resolution
319
+ ) as sc:
279
320
  self._shape = sc.exclusions.shape
280
321
 
281
322
  return self._shape
@@ -301,14 +342,18 @@ class BaseAggregation(ABC):
301
342
  Area of an exclusion pixel in km2
302
343
  """
303
344
  if excl_area is None:
304
- logger.debug('Setting the exclusion area from the area of a pixel '
305
- 'in {}'.format(excl_fpath))
345
+ logger.debug(
346
+ "Setting the exclusion area from the area of a pixel "
347
+ "in {}".format(excl_fpath)
348
+ )
306
349
  with ExclusionLayers(excl_fpath) as excl:
307
350
  excl_area = excl.pixel_area
308
351
 
309
352
  if excl_area is None:
310
- e = ('No exclusion pixel area was input and could not parse '
311
- 'area from the exclusion file attributes!')
353
+ e = (
354
+ "No exclusion pixel area was input and could not parse "
355
+ "area from the exclusion file attributes!"
356
+ )
312
357
  logger.error(e)
313
358
  raise SupplyCurveInputError(e)
314
359
 
@@ -337,14 +382,17 @@ class BaseAggregation(ABC):
337
382
  elif isinstance(inclusion_mask, np.ndarray):
338
383
  assert inclusion_mask.shape == excl_shape
339
384
  elif inclusion_mask is not None:
340
- msg = ('Expected inclusion_mask to be dict or array but received '
341
- '{}'.format(type(inclusion_mask)))
385
+ msg = (
386
+ "Expected inclusion_mask to be dict or array but received "
387
+ "{}".format(type(inclusion_mask))
388
+ )
342
389
  logger.error(msg)
343
390
  raise SupplyCurveInputError(msg)
344
391
 
345
392
  @staticmethod
346
- def _get_gid_inclusion_mask(inclusion_mask, gid, slice_lookup,
347
- resolution=64):
393
+ def _get_gid_inclusion_mask(
394
+ inclusion_mask, gid, slice_lookup, resolution=64
395
+ ):
348
396
  """
349
397
  Get inclusion mask for desired gid
350
398
 
@@ -381,8 +429,10 @@ class BaseAggregation(ABC):
381
429
  row_slice, col_slice = slice_lookup[gid]
382
430
  gid_inclusions = inclusion_mask[row_slice, col_slice]
383
431
  elif inclusion_mask is not None:
384
- msg = ('Expected inclusion_mask to be dict or array but received '
385
- '{}'.format(type(inclusion_mask)))
432
+ msg = (
433
+ "Expected inclusion_mask to be dict or array but received "
434
+ "{}".format(type(inclusion_mask))
435
+ )
386
436
  logger.error(msg)
387
437
  raise SupplyCurveInputError(msg)
388
438
 
@@ -407,26 +457,34 @@ class BaseAggregation(ABC):
407
457
  generation run.
408
458
  """
409
459
 
410
- if gen_fpath.endswith('.h5'):
460
+ if gen_fpath.endswith(".h5"):
411
461
  with Resource(gen_fpath) as f:
412
462
  gen_index = f.meta
413
- elif gen_fpath.endswith('.csv'):
463
+ elif gen_fpath.endswith(".csv"):
414
464
  gen_index = pd.read_csv(gen_fpath)
415
465
  else:
416
- msg = ('Could not recognize gen_fpath input, needs to be reV gen '
417
- 'output h5 or project points csv but received: {}'
418
- .format(gen_fpath))
466
+ msg = (
467
+ "Could not recognize gen_fpath input, needs to be reV gen "
468
+ "output h5 or project points csv but received: {}".format(
469
+ gen_fpath
470
+ )
471
+ )
419
472
  logger.error(msg)
420
473
  raise FileInputError(msg)
421
474
 
422
- if 'gid' in gen_index:
423
- gen_index = gen_index.rename(columns={'gid': 'res_gids'})
424
- gen_index['gen_gids'] = gen_index.index
425
- gen_index = gen_index[['res_gids', 'gen_gids']]
426
- gen_index = gen_index.set_index(keys='res_gids')
427
- gen_index = \
428
- gen_index.reindex(range(int(gen_index.index.max() + 1)))
429
- gen_index = gen_index['gen_gids'].values
475
+ if ResourceMetaField.GID in gen_index:
476
+ gen_index = gen_index.rename(
477
+ columns={ResourceMetaField.GID: SupplyCurveField.RES_GIDS}
478
+ )
479
+ gen_index[SupplyCurveField.GEN_GIDS] = gen_index.index
480
+ gen_index = gen_index[
481
+ [SupplyCurveField.RES_GIDS, SupplyCurveField.GEN_GIDS]
482
+ ]
483
+ gen_index = gen_index.set_index(keys=SupplyCurveField.RES_GIDS)
484
+ gen_index = gen_index.reindex(
485
+ range(int(gen_index.index.max() + 1))
486
+ )
487
+ gen_index = gen_index[SupplyCurveField.GEN_GIDS].values
430
488
  gen_index[np.isnan(gen_index)] = -1
431
489
  gen_index = gen_index.astype(np.int32)
432
490
  else:
@@ -439,10 +497,19 @@ class Aggregation(BaseAggregation):
439
497
  """Concrete but generalized aggregation framework to aggregate ANY reV h5
440
498
  file to a supply curve grid (based on an aggregated exclusion grid)."""
441
499
 
442
- def __init__(self, excl_fpath, tm_dset, *agg_dset,
443
- excl_dict=None, area_filter_kernel='queen', min_area=None,
444
- resolution=64, excl_area=None, gids=None,
445
- pre_extract_inclusions=False):
500
+ def __init__(
501
+ self,
502
+ excl_fpath,
503
+ tm_dset,
504
+ *agg_dset,
505
+ excl_dict=None,
506
+ area_filter_kernel="queen",
507
+ min_area=None,
508
+ resolution=64,
509
+ excl_area=None,
510
+ gids=None,
511
+ pre_extract_inclusions=False,
512
+ ):
446
513
  """
447
514
  Parameters
448
515
  ----------
@@ -486,18 +553,24 @@ class Aggregation(BaseAggregation):
486
553
  the inclusion mask on the fly with parallel workers.
487
554
  """
488
555
  log_versions(logger)
489
- logger.info('Initializing Aggregation...')
490
- logger.debug('Exclusion filepath: {}'.format(excl_fpath))
491
- logger.debug('Exclusion dict: {}'.format(excl_dict))
492
-
493
- super().__init__(excl_fpath, tm_dset, excl_dict=excl_dict,
494
- area_filter_kernel=area_filter_kernel,
495
- min_area=min_area, resolution=resolution,
496
- excl_area=excl_area, gids=gids,
497
- pre_extract_inclusions=pre_extract_inclusions)
556
+ logger.info("Initializing Aggregation...")
557
+ logger.debug("Exclusion filepath: {}".format(excl_fpath))
558
+ logger.debug("Exclusion dict: {}".format(excl_dict))
559
+
560
+ super().__init__(
561
+ excl_fpath,
562
+ tm_dset,
563
+ excl_dict=excl_dict,
564
+ area_filter_kernel=area_filter_kernel,
565
+ min_area=min_area,
566
+ resolution=resolution,
567
+ excl_area=excl_area,
568
+ gids=gids,
569
+ pre_extract_inclusions=pre_extract_inclusions,
570
+ )
498
571
 
499
572
  if isinstance(agg_dset, str):
500
- agg_dset = (agg_dset, )
573
+ agg_dset = (agg_dset,)
501
574
 
502
575
  self._agg_dsets = agg_dset
503
576
 
@@ -505,33 +578,51 @@ class Aggregation(BaseAggregation):
505
578
  """Do a preflight check on input files"""
506
579
 
507
580
  if not os.path.exists(self._excl_fpath):
508
- raise FileNotFoundError('Could not find required exclusions file: '
509
- '{}'.format(self._excl_fpath))
581
+ raise FileNotFoundError(
582
+ "Could not find required exclusions file: " "{}".format(
583
+ self._excl_fpath
584
+ )
585
+ )
510
586
 
511
587
  if not os.path.exists(h5_fpath):
512
- raise FileNotFoundError('Could not find required h5 file: '
513
- '{}'.format(h5_fpath))
588
+ raise FileNotFoundError(
589
+ "Could not find required h5 file: " "{}".format(h5_fpath)
590
+ )
514
591
 
515
- with h5py.File(self._excl_fpath, 'r') as f:
592
+ with h5py.File(self._excl_fpath, "r") as f:
516
593
  if self._tm_dset not in f:
517
- raise FileInputError('Could not find techmap dataset "{}" '
518
- 'in exclusions file: {}'
519
- .format(self._tm_dset,
520
- self._excl_fpath))
594
+ raise FileInputError(
595
+ 'Could not find techmap dataset "{}" '
596
+ "in exclusions file: {}".format(
597
+ self._tm_dset, self._excl_fpath
598
+ )
599
+ )
521
600
 
522
601
  with Resource(h5_fpath) as f:
523
602
  for dset in self._agg_dsets:
524
603
  if dset not in f:
525
- raise FileInputError('Could not find provided dataset "{}"'
526
- ' in h5 file: {}'
527
- .format(dset, h5_fpath))
604
+ raise FileInputError(
605
+ 'Could not find provided dataset "{}"'
606
+ " in h5 file: {}".format(dset, h5_fpath)
607
+ )
528
608
 
529
609
  @classmethod
530
- def run_serial(cls, excl_fpath, h5_fpath, tm_dset, *agg_dset,
531
- agg_method='mean', excl_dict=None, inclusion_mask=None,
532
- area_filter_kernel='queen', min_area=None,
533
- resolution=64, excl_area=0.0081, gids=None,
534
- gen_index=None):
610
+ def run_serial(
611
+ cls,
612
+ excl_fpath,
613
+ h5_fpath,
614
+ tm_dset,
615
+ *agg_dset,
616
+ agg_method="mean",
617
+ excl_dict=None,
618
+ inclusion_mask=None,
619
+ area_filter_kernel="queen",
620
+ min_area=None,
621
+ resolution=64,
622
+ excl_area=0.0081,
623
+ gids=None,
624
+ gen_index=None,
625
+ ):
535
626
  """
536
627
  Standalone method to aggregate - can be parallelized.
537
628
 
@@ -602,17 +693,22 @@ class Aggregation(BaseAggregation):
602
693
  cls._check_inclusion_mask(inclusion_mask, gids, exclusion_shape)
603
694
 
604
695
  # pre-extract handlers so they are not repeatedly initialized
605
- file_kwargs = {'excl_dict': excl_dict,
606
- 'area_filter_kernel': area_filter_kernel,
607
- 'min_area': min_area}
608
- dsets = agg_dset + ('meta', )
696
+ file_kwargs = {
697
+ "excl_dict": excl_dict,
698
+ "area_filter_kernel": area_filter_kernel,
699
+ "min_area": min_area,
700
+ }
701
+ dsets = (
702
+ *agg_dset,
703
+ "meta",
704
+ )
609
705
  agg_out = {ds: [] for ds in dsets}
610
706
  with AggFileHandler(excl_fpath, h5_fpath, **file_kwargs) as fh:
611
707
  n_finished = 0
612
708
  for gid in gids:
613
709
  gid_inclusions = cls._get_gid_inclusion_mask(
614
- inclusion_mask, gid, slice_lookup,
615
- resolution=resolution)
710
+ inclusion_mask, gid, slice_lookup, resolution=resolution
711
+ )
616
712
  try:
617
713
  gid_out = AggregationSupplyCurvePoint.run(
618
714
  gid,
@@ -627,28 +723,40 @@ class Aggregation(BaseAggregation):
627
723
  excl_area=excl_area,
628
724
  exclusion_shape=exclusion_shape,
629
725
  close=False,
630
- gen_index=gen_index)
726
+ gen_index=gen_index,
727
+ )
631
728
 
632
729
  except EmptySupplyCurvePointError:
633
- logger.debug('SC gid {} is fully excluded or does not '
634
- 'have any valid source data!'.format(gid))
730
+ logger.debug(
731
+ "SC gid {} is fully excluded or does not "
732
+ "have any valid source data!".format(gid)
733
+ )
635
734
  except Exception as e:
636
- msg = 'SC gid {} failed!'.format(gid)
735
+ msg = "SC gid {} failed!".format(gid)
637
736
  logger.exception(msg)
638
737
  raise RuntimeError(msg) from e
639
738
  else:
640
739
  n_finished += 1
641
- logger.debug('Serial aggregation: '
642
- '{} out of {} points complete'
643
- .format(n_finished, len(gids)))
740
+ logger.debug(
741
+ "Serial aggregation: "
742
+ "{} out of {} points complete".format(
743
+ n_finished, len(gids)
744
+ )
745
+ )
644
746
  log_mem(logger)
645
747
  for k, v in gid_out.items():
646
748
  agg_out[k].append(v)
647
749
 
648
750
  return agg_out
649
751
 
650
- def run_parallel(self, h5_fpath, agg_method='mean', excl_area=None,
651
- max_workers=None, sites_per_worker=100):
752
+ def run_parallel(
753
+ self,
754
+ h5_fpath,
755
+ agg_method="mean",
756
+ excl_area=None,
757
+ max_workers=None,
758
+ sites_per_worker=100,
759
+ ):
652
760
  """
653
761
  Aggregate in parallel
654
762
 
@@ -681,22 +789,29 @@ class Aggregation(BaseAggregation):
681
789
  chunks = np.array_split(self.gids, chunks)
682
790
 
683
791
  if self._inclusion_mask is not None:
684
- with SupplyCurveExtent(self._excl_fpath,
685
- resolution=self._resolution) as sc:
792
+ with SupplyCurveExtent(
793
+ self._excl_fpath, resolution=self._resolution
794
+ ) as sc:
686
795
  assert sc.exclusions.shape == self._inclusion_mask.shape
687
796
  slice_lookup = sc.get_slice_lookup(self.gids)
688
797
 
689
- logger.info('Running supply curve point aggregation for '
690
- 'points {} through {} at a resolution of {} '
691
- 'on {} cores in {} chunks.'
692
- .format(self.gids[0], self.gids[-1], self._resolution,
693
- max_workers, len(chunks)))
798
+ logger.info(
799
+ "Running supply curve point aggregation for "
800
+ "points {} through {} at a resolution of {} "
801
+ "on {} cores in {} chunks.".format(
802
+ self.gids[0],
803
+ self.gids[-1],
804
+ self._resolution,
805
+ max_workers,
806
+ len(chunks),
807
+ )
808
+ )
694
809
 
695
810
  n_finished = 0
696
811
  futures = []
697
- dsets = self._agg_dsets + ('meta', )
812
+ dsets = self._agg_dsets + ("meta",)
698
813
  agg_out = {ds: [] for ds in dsets}
699
- loggers = [__name__, 'reV.supply_curve.points', 'reV']
814
+ loggers = [__name__, "reV.supply_curve.points", "reV"]
700
815
  with SpawnProcessPool(max_workers=max_workers, loggers=loggers) as exe:
701
816
  # iterate through split executions, submitting each to worker
702
817
  for gid_set in chunks:
@@ -709,36 +824,45 @@ class Aggregation(BaseAggregation):
709
824
  chunk_incl_masks[gid] = self._inclusion_mask[rs, cs]
710
825
 
711
826
  # submit executions and append to futures list
712
- futures.append(exe.submit(
713
- self.run_serial,
714
- self._excl_fpath,
715
- h5_fpath,
716
- self._tm_dset,
717
- *self._agg_dsets,
718
- agg_method=agg_method,
719
- excl_dict=self._excl_dict,
720
- inclusion_mask=chunk_incl_masks,
721
- area_filter_kernel=self._area_filter_kernel,
722
- min_area=self._min_area,
723
- resolution=self._resolution,
724
- excl_area=excl_area,
725
- gids=gid_set,
726
- gen_index=gen_index))
827
+ futures.append(
828
+ exe.submit(
829
+ self.run_serial,
830
+ self._excl_fpath,
831
+ h5_fpath,
832
+ self._tm_dset,
833
+ *self._agg_dsets,
834
+ agg_method=agg_method,
835
+ excl_dict=self._excl_dict,
836
+ inclusion_mask=chunk_incl_masks,
837
+ area_filter_kernel=self._area_filter_kernel,
838
+ min_area=self._min_area,
839
+ resolution=self._resolution,
840
+ excl_area=excl_area,
841
+ gids=gid_set,
842
+ gen_index=gen_index,
843
+ )
844
+ )
727
845
 
728
846
  # gather results
729
847
  for future in futures:
730
848
  n_finished += 1
731
- logger.info('Parallel aggregation futures collected: '
732
- '{} out of {}'
733
- .format(n_finished, len(chunks)))
849
+ logger.info(
850
+ "Parallel aggregation futures collected: "
851
+ "{} out of {}".format(n_finished, len(chunks))
852
+ )
734
853
  for k, v in future.result().items():
735
854
  if v:
736
855
  agg_out[k].extend(v)
737
856
 
738
857
  return agg_out
739
858
 
740
- def aggregate(self, h5_fpath, agg_method='mean', max_workers=None,
741
- sites_per_worker=100):
859
+ def aggregate(
860
+ self,
861
+ h5_fpath,
862
+ agg_method="mean",
863
+ max_workers=None,
864
+ sites_per_worker=100,
865
+ ):
742
866
  """
743
867
  Aggregate with given agg_method
744
868
 
@@ -766,38 +890,44 @@ class Aggregation(BaseAggregation):
766
890
  if max_workers == 1:
767
891
  self._check_files(h5_fpath)
768
892
  gen_index = self._parse_gen_index(h5_fpath)
769
- agg = self.run_serial(self._excl_fpath,
770
- h5_fpath,
771
- self._tm_dset,
772
- *self._agg_dsets,
773
- agg_method=agg_method,
774
- excl_dict=self._excl_dict,
775
- gids=self.gids,
776
- inclusion_mask=self._inclusion_mask,
777
- area_filter_kernel=self._area_filter_kernel,
778
- min_area=self._min_area,
779
- resolution=self._resolution,
780
- excl_area=self._excl_area,
781
- gen_index=gen_index)
893
+ agg = self.run_serial(
894
+ self._excl_fpath,
895
+ h5_fpath,
896
+ self._tm_dset,
897
+ *self._agg_dsets,
898
+ agg_method=agg_method,
899
+ excl_dict=self._excl_dict,
900
+ gids=self.gids,
901
+ inclusion_mask=self._inclusion_mask,
902
+ area_filter_kernel=self._area_filter_kernel,
903
+ min_area=self._min_area,
904
+ resolution=self._resolution,
905
+ excl_area=self._excl_area,
906
+ gen_index=gen_index,
907
+ )
782
908
  else:
783
- agg = self.run_parallel(h5_fpath=h5_fpath,
784
- agg_method=agg_method,
785
- excl_area=self._excl_area,
786
- max_workers=max_workers,
787
- sites_per_worker=sites_per_worker)
788
-
789
- if not agg['meta']:
790
- e = ('Supply curve aggregation found no non-excluded SC points. '
791
- 'Please check your exclusions or subset SC GID selection.')
909
+ agg = self.run_parallel(
910
+ h5_fpath=h5_fpath,
911
+ agg_method=agg_method,
912
+ excl_area=self._excl_area,
913
+ max_workers=max_workers,
914
+ sites_per_worker=sites_per_worker,
915
+ )
916
+
917
+ if not agg["meta"]:
918
+ e = (
919
+ "Supply curve aggregation found no non-excluded SC points. "
920
+ "Please check your exclusions or subset SC GID selection."
921
+ )
792
922
  logger.error(e)
793
923
  raise EmptySupplyCurvePointError(e)
794
924
 
795
925
  for k, v in agg.items():
796
- if k == 'meta':
926
+ if k == "meta":
797
927
  v = pd.concat(v, axis=1).T
798
- v = v.sort_values('sc_point_gid')
928
+ v = v.sort_values(SupplyCurveField.SC_POINT_GID)
799
929
  v = v.reset_index(drop=True)
800
- v.index.name = 'sc_gid'
930
+ v.index.name = SupplyCurveField.SC_GID
801
931
  agg[k] = v
802
932
  else:
803
933
  v = np.dstack(v)[0]
@@ -821,7 +951,7 @@ class Aggregation(BaseAggregation):
821
951
  Aggregated values for each aggregation dataset
822
952
  """
823
953
  agg_out = aggregation.copy()
824
- meta = agg_out.pop('meta').reset_index()
954
+ meta = agg_out.pop("meta").reset_index()
825
955
  for c in meta.columns:
826
956
  try:
827
957
  meta[c] = pd.to_numeric(meta[c])
@@ -840,7 +970,7 @@ class Aggregation(BaseAggregation):
840
970
  shape = data.shape
841
971
  shapes[dset] = shape
842
972
  if len(data.shape) == 2:
843
- if ('time_index' in f) and (shape[0] == f.shape[0]):
973
+ if ("time_index" in f) and (shape[0] == f.shape[0]):
844
974
  if time_index is None:
845
975
  time_index = f.time_index
846
976
 
@@ -849,19 +979,40 @@ class Aggregation(BaseAggregation):
849
979
  chunks[dset] = chunk
850
980
  dtypes[dset] = dtype
851
981
 
852
- Outputs.init_h5(out_fpath, dsets, shapes, attrs, chunks, dtypes,
853
- meta, time_index=time_index)
854
-
855
- with Outputs(out_fpath, mode='a') as out:
982
+ Outputs.init_h5(
983
+ out_fpath,
984
+ dsets,
985
+ shapes,
986
+ attrs,
987
+ chunks,
988
+ dtypes,
989
+ meta,
990
+ time_index=time_index,
991
+ )
992
+
993
+ with Outputs(out_fpath, mode="a") as out:
856
994
  for dset, data in agg_out.items():
857
995
  out[dset] = data
858
996
 
859
997
  @classmethod
860
- def run(cls, excl_fpath, h5_fpath, tm_dset, *agg_dset,
861
- excl_dict=None, area_filter_kernel='queen', min_area=None,
862
- resolution=64, excl_area=None, gids=None,
863
- pre_extract_inclusions=False, agg_method='mean', max_workers=None,
864
- sites_per_worker=100, out_fpath=None):
998
+ def run(
999
+ cls,
1000
+ excl_fpath,
1001
+ h5_fpath,
1002
+ tm_dset,
1003
+ *agg_dset,
1004
+ excl_dict=None,
1005
+ area_filter_kernel="queen",
1006
+ min_area=None,
1007
+ resolution=64,
1008
+ excl_area=None,
1009
+ gids=None,
1010
+ pre_extract_inclusions=False,
1011
+ agg_method="mean",
1012
+ max_workers=None,
1013
+ sites_per_worker=100,
1014
+ out_fpath=None,
1015
+ ):
865
1016
  """Get the supply curve points aggregation summary.
866
1017
 
867
1018
  Parameters
@@ -923,15 +1074,25 @@ class Aggregation(BaseAggregation):
923
1074
  Aggregated values for each aggregation dataset
924
1075
  """
925
1076
 
926
- agg = cls(excl_fpath, tm_dset, *agg_dset,
927
- excl_dict=excl_dict, area_filter_kernel=area_filter_kernel,
928
- min_area=min_area, resolution=resolution,
929
- excl_area=excl_area, gids=gids,
930
- pre_extract_inclusions=pre_extract_inclusions)
931
-
932
- aggregation = agg.aggregate(h5_fpath=h5_fpath, agg_method=agg_method,
933
- max_workers=max_workers,
934
- sites_per_worker=sites_per_worker)
1077
+ agg = cls(
1078
+ excl_fpath,
1079
+ tm_dset,
1080
+ *agg_dset,
1081
+ excl_dict=excl_dict,
1082
+ area_filter_kernel=area_filter_kernel,
1083
+ min_area=min_area,
1084
+ resolution=resolution,
1085
+ excl_area=excl_area,
1086
+ gids=gids,
1087
+ pre_extract_inclusions=pre_extract_inclusions,
1088
+ )
1089
+
1090
+ aggregation = agg.aggregate(
1091
+ h5_fpath=h5_fpath,
1092
+ agg_method=agg_method,
1093
+ max_workers=max_workers,
1094
+ sites_per_worker=sites_per_worker,
1095
+ )
935
1096
 
936
1097
  if out_fpath is not None:
937
1098
  agg.save_agg_to_h5(h5_fpath, out_fpath, aggregation)