hspf 2.1.2__py3-none-any.whl → 2.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hspf/hbn.py +149 -27
- hspf/hspfModel.py +47 -8
- hspf/parser/graph.py +2 -1
- hspf/reports.py +320 -132
- hspf/uci.py +16 -5
- {hspf-2.1.2.dist-info → hspf-2.1.3.dist-info}/METADATA +1 -1
- {hspf-2.1.2.dist-info → hspf-2.1.3.dist-info}/RECORD +8 -8
- {hspf-2.1.2.dist-info → hspf-2.1.3.dist-info}/WHEEL +0 -0
hspf/hbn.py
CHANGED
|
@@ -6,7 +6,7 @@ nutrients relevant for our current calibration methods. (See calibration_helpers
|
|
|
6
6
|
|
|
7
7
|
@author: mfratki
|
|
8
8
|
"""
|
|
9
|
-
from
|
|
9
|
+
from hspf import helpers
|
|
10
10
|
import pandas as pd
|
|
11
11
|
import math
|
|
12
12
|
from struct import unpack
|
|
@@ -14,6 +14,7 @@ from numpy import fromfile
|
|
|
14
14
|
from pandas import DataFrame
|
|
15
15
|
from datetime import datetime, timedelta #, timezone
|
|
16
16
|
from collections import defaultdict
|
|
17
|
+
from collections.abc import MutableMapping
|
|
17
18
|
#from pathlib import Path
|
|
18
19
|
|
|
19
20
|
|
|
@@ -189,11 +190,30 @@ class hbnInterface:
|
|
|
189
190
|
def _clear_cache(self):
|
|
190
191
|
[hbn._clear_cache() for hbn in self.hbns]
|
|
191
192
|
|
|
193
|
+
|
|
194
|
+
|
|
192
195
|
def get_time_series(self, t_opn, t_cons, t_code, opnid, activity = None):
|
|
193
|
-
|
|
196
|
+
df = pd.concat([hbn._get_time_series(t_opn, t_cons, t_code, opnid, activity) for hbn in self.hbns],axis = 1)
|
|
197
|
+
if df.empty:
|
|
198
|
+
raise ValueError(f"No data found for {t_opn} {t_cons} {t_code} {opnid} {activity}")
|
|
199
|
+
|
|
200
|
+
if long_format:
|
|
201
|
+
df = df.reset_index().melt(id_vars = ['index'],var_name = 'OPNID',value_name = t_con)
|
|
202
|
+
df.rename(columns = {'index':'datetime'},inplace = True)
|
|
203
|
+
df['OPERATION'] = t_opn
|
|
204
|
+
return df
|
|
205
|
+
|
|
206
|
+
def get_multiple_timeseries(self,t_opn,t_code,t_con,opnids = None,activity = None,axis = 1,long_format = False):
|
|
207
|
+
df = pd.concat([hbn._get_multiple_timeseries(t_opn,t_code,t_con,opnids,activity) for hbn in self.hbns],axis = 1)
|
|
208
|
+
if df.empty:
|
|
209
|
+
raise ValueError(f"No data found for {t_opn} {t_con} {t_code} {opnids} {activity}")
|
|
194
210
|
|
|
195
|
-
|
|
196
|
-
|
|
211
|
+
if long_format:
|
|
212
|
+
df = df.reset_index().melt(id_vars = ['index'],var_name = 'OPNID',value_name = 'value')
|
|
213
|
+
df.rename(columns = {'index':'datetime'},inplace = True)
|
|
214
|
+
df['TIMESERIES'] = t_con
|
|
215
|
+
df['OPERATION'] = t_opn
|
|
216
|
+
return df
|
|
197
217
|
|
|
198
218
|
def get_perlnd_constituent(self,constituent,perlnd_ids = None,time_step = 5):
|
|
199
219
|
return get_simulated_perlnd_constituent(self,constituent,time_step)
|
|
@@ -217,13 +237,33 @@ class hbnInterface:
|
|
|
217
237
|
# for dic in dics:
|
|
218
238
|
# for key, vals in dic.items():
|
|
219
239
|
# [dd[key].append(val) for val in vals]
|
|
220
|
-
dd = defaultdict(set)
|
|
240
|
+
# dd = defaultdict(set)
|
|
221
241
|
dics = [hbn.output_names() for hbn in self.hbns]
|
|
242
|
+
return merge_dicts(dics)
|
|
243
|
+
# for dic in dics:
|
|
244
|
+
# for operation, vals in dic.items():
|
|
245
|
+
# for activity,v in vals.items():
|
|
246
|
+
# [dd[operation][activity].add(t) for t in v]
|
|
247
|
+
# return dd
|
|
248
|
+
|
|
249
|
+
def _timeseries(self):
|
|
250
|
+
mapn = self._mapn()
|
|
251
|
+
timeseries = []
|
|
252
|
+
for key, vals in mapn.items():
|
|
253
|
+
_key = list(key)
|
|
254
|
+
for val in vals:
|
|
255
|
+
timeseries.append(_key + [val])
|
|
256
|
+
return timeseries
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
def _mapn(self):
|
|
260
|
+
dd = defaultdict(set)
|
|
261
|
+
dics = [hbn.mapn for hbn in self.hbns]
|
|
222
262
|
for dic in dics:
|
|
223
263
|
for key, vals in dic.items():
|
|
224
264
|
[dd[key].add(val) for val in vals]
|
|
225
|
-
return dd
|
|
226
|
-
|
|
265
|
+
return dd
|
|
266
|
+
|
|
227
267
|
def get_perlnd_data(self,constituent,t_code = 'yearly'):
|
|
228
268
|
t_cons = helpers.get_tcons(constituent,'PERLND')
|
|
229
269
|
|
|
@@ -236,14 +276,13 @@ class hbnInterface:
|
|
|
236
276
|
return df
|
|
237
277
|
|
|
238
278
|
|
|
239
|
-
def
|
|
279
|
+
def get_rchres_output(self,constituent,units = 'mg/l',t_code = 5):
|
|
240
280
|
'''
|
|
241
281
|
Convience function for accessing the hbn time series associated with our current
|
|
242
282
|
calibration method. Assumes you are summing across all dataframes.
|
|
243
283
|
'''
|
|
244
|
-
|
|
245
|
-
df =
|
|
246
|
-
df.columns = reach_ids
|
|
284
|
+
t_cons = helpers.get_tcons(constituent,'RCHRES',units)
|
|
285
|
+
df = sum([self.get_multiple_timeseries('RCHRES',t_code,t_con) for t_con in t_cons])
|
|
247
286
|
df.attrs['unit'] = units
|
|
248
287
|
df.attrs['constituent'] = constituent
|
|
249
288
|
return df
|
|
@@ -399,19 +438,27 @@ class hbnClass:
|
|
|
399
438
|
def infer_opnids(self,t_opn, t_cons,activity):
|
|
400
439
|
result = [k[-2] for k,v in self.mapn.items() if (t_cons in v) & (k[0] == t_opn) & (k[-1] == activity)]
|
|
401
440
|
if len(result) == 0:
|
|
402
|
-
|
|
441
|
+
result = [-1]
|
|
442
|
+
# return print('No Constituent-OPNID relationship found')
|
|
403
443
|
return result
|
|
404
444
|
|
|
405
445
|
|
|
406
446
|
def infer_activity(self,t_opn, t_cons):
|
|
407
447
|
result = [k[-1] for k,v in self.mapn.items() if (t_cons in v) & (k[0] == t_opn)]
|
|
408
448
|
if len(result) == 0:
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
449
|
+
result = ''
|
|
450
|
+
else:# return print('No Constituent-Activity relationship found')
|
|
451
|
+
assert(len(set(result)) == 1)
|
|
452
|
+
result = result[0]
|
|
453
|
+
return result
|
|
413
454
|
|
|
414
455
|
def get_time_series(self, t_opn, t_cons, t_code, opnid, activity = None):
|
|
456
|
+
df = self._get_time_series(t_opn, t_cons, t_code, opnid, activity)
|
|
457
|
+
if df.empty:
|
|
458
|
+
raise ValueError(f"No data found for {t_opn} {t_cons} {t_code} {opnid} {activity}")
|
|
459
|
+
return df
|
|
460
|
+
|
|
461
|
+
def _get_time_series(self, t_opn, t_cons, t_code, opnid, activity = None):
|
|
415
462
|
"""
|
|
416
463
|
get a single time series based on:
|
|
417
464
|
1. t_opn: RCHRES, IMPLND, PERLND
|
|
@@ -420,13 +467,15 @@ class hbnClass:
|
|
|
420
467
|
4. t_activity: HYDR, IQUAL, etc
|
|
421
468
|
5. time_unit: yearly, monthly, full (default is 'full' simulation duration)
|
|
422
469
|
"""
|
|
470
|
+
|
|
471
|
+
|
|
423
472
|
if isinstance(t_code,str):
|
|
424
473
|
t_code = self.tcodes[t_code]
|
|
425
474
|
|
|
426
475
|
if activity is None:
|
|
427
476
|
activity = self.infer_activity(t_opn,t_cons)
|
|
428
|
-
|
|
429
|
-
|
|
477
|
+
|
|
478
|
+
|
|
430
479
|
summaryindx = f'{t_opn}_{activity}_{opnid:03d}_{t_code}'
|
|
431
480
|
if summaryindx in self.summaryindx:
|
|
432
481
|
df = self.data_frames[summaryindx][t_cons].copy()
|
|
@@ -438,25 +487,31 @@ class hbnClass:
|
|
|
438
487
|
#df.index = df.index.shift(-1,TCODES2FREQ[t_code])
|
|
439
488
|
df = df[df.index >= '1996-01-01']
|
|
440
489
|
else:
|
|
441
|
-
df =
|
|
490
|
+
df = pd.DataFrame()
|
|
442
491
|
|
|
443
492
|
return df
|
|
493
|
+
|
|
444
494
|
def get_multiple_timeseries(self,t_opn,t_code,t_con,opnids = None,activity = None):
|
|
495
|
+
df = self._get_multiple_timeseries(t_opn,t_code,t_con,opnids,activity)
|
|
496
|
+
if df.empty:
|
|
497
|
+
raise ValueError(f"No data found for {t_opn} {t_con} {t_code} {opnids} {activity}")
|
|
498
|
+
return df
|
|
499
|
+
|
|
500
|
+
def _get_multiple_timeseries(self,t_opn,t_code,t_con,opnids = None,activity = None):
|
|
445
501
|
# a single constituent but multiple opnids
|
|
502
|
+
|
|
503
|
+
|
|
446
504
|
if isinstance(t_code,str):
|
|
447
505
|
t_code = self.tcodes[t_code]
|
|
448
506
|
|
|
449
507
|
if activity is None:
|
|
450
|
-
activity = self.infer_activity(t_opn,t_con)
|
|
451
|
-
|
|
452
|
-
return None
|
|
453
|
-
|
|
508
|
+
activity = self.infer_activity(t_opn,t_con)
|
|
509
|
+
|
|
454
510
|
if opnids is None:
|
|
455
511
|
opnids = self.infer_opnids(t_opn,t_con,activity)
|
|
456
|
-
|
|
457
|
-
return None
|
|
512
|
+
|
|
458
513
|
|
|
459
|
-
df =
|
|
514
|
+
df = pd.DataFrame()
|
|
460
515
|
frames = []
|
|
461
516
|
mapd_list = list(self.mapd.keys())
|
|
462
517
|
for opnid in opnids:
|
|
@@ -475,9 +530,76 @@ class hbnClass:
|
|
|
475
530
|
dic[activity] = set([item for sublist in t_cons for item in sublist])
|
|
476
531
|
return dic
|
|
477
532
|
|
|
533
|
+
|
|
534
|
+
def output_names(self):
|
|
535
|
+
|
|
536
|
+
activities = []
|
|
537
|
+
operations = []
|
|
538
|
+
for k, v in self.mapn.items():
|
|
539
|
+
operations.append(k[0])
|
|
540
|
+
activities.append(k[-1])
|
|
541
|
+
|
|
542
|
+
operations = set(operations)
|
|
543
|
+
activities = set(activities)
|
|
544
|
+
#activities = set([k[-1] for k,v in self.mapn.items()])
|
|
545
|
+
|
|
546
|
+
dic = {}
|
|
547
|
+
for operation in operations:
|
|
548
|
+
acitivities = set([k[-1] for k,v in self.mapn.items() if k[0] == operation])
|
|
549
|
+
dic[operation] = {}
|
|
550
|
+
for activity in acitivities:
|
|
551
|
+
t_cons = [v for k,v in self.mapn.items() if (k[0] == operation) & (k[-1] == activity)]
|
|
552
|
+
dic[operation][activity] = set([item for sublist in t_cons for item in sublist])
|
|
553
|
+
# for activity in activities:
|
|
554
|
+
# t_cons = [v for k,v in self.mapn.items() if k[-1] == activity]
|
|
555
|
+
# dic[activity] = set([item for sublist in t_cons for item in sublist])
|
|
556
|
+
return dic
|
|
557
|
+
|
|
558
|
+
def get_timeseries(self):
|
|
559
|
+
mapn = self.mapn
|
|
560
|
+
timeseries = []
|
|
561
|
+
for key, vals in mapn.items():
|
|
562
|
+
_key = list(key)
|
|
563
|
+
for val in vals:
|
|
564
|
+
timeseries.append(_key + [val])
|
|
565
|
+
return timeseries
|
|
566
|
+
|
|
478
567
|
@staticmethod
|
|
479
568
|
def get_perlands(summary_indxs):
|
|
480
569
|
perlands = [int(summary_indx.split('_')[-2]) for summary_indx in summary_indxs]
|
|
481
570
|
return perlands
|
|
482
571
|
|
|
483
|
-
|
|
572
|
+
|
|
573
|
+
def merge_dicts(dicts):
|
|
574
|
+
"""
|
|
575
|
+
Merge a list of dictionaries into a single dictionary, combining sets
|
|
576
|
+
at the leaf level and properly merging nested dictionaries.
|
|
577
|
+
|
|
578
|
+
Args:
|
|
579
|
+
dicts (list): A list of dictionaries to merge.
|
|
580
|
+
|
|
581
|
+
Returns:
|
|
582
|
+
dict: The merged dictionary.
|
|
583
|
+
"""
|
|
584
|
+
def recursive_merge(d1, d2):
|
|
585
|
+
for key, value in d2.items():
|
|
586
|
+
if key in d1:
|
|
587
|
+
# If the value is a dictionary, recurse
|
|
588
|
+
if isinstance(d1[key], MutableMapping) and isinstance(value, MutableMapping):
|
|
589
|
+
recursive_merge(d1[key], value)
|
|
590
|
+
# If the value is a set, merge the sets
|
|
591
|
+
elif isinstance(d1[key], set) and isinstance(value, set):
|
|
592
|
+
d1[key].update(value)
|
|
593
|
+
else:
|
|
594
|
+
raise ValueError(f"Incompatible types for key '{key}': {type(d1[key])} vs {type(value)}")
|
|
595
|
+
else:
|
|
596
|
+
# If the key does not exist in d1, copy it
|
|
597
|
+
d1[key] = value
|
|
598
|
+
|
|
599
|
+
# Start with an empty dictionary
|
|
600
|
+
merged_dict = {}
|
|
601
|
+
|
|
602
|
+
for d in dicts:
|
|
603
|
+
recursive_merge(merged_dict, d)
|
|
604
|
+
|
|
605
|
+
return merged_dict
|
hspf/hspfModel.py
CHANGED
|
@@ -7,17 +7,19 @@ Created on Thu Oct 13 09:26:05 2022
|
|
|
7
7
|
from pathlib import Path
|
|
8
8
|
import os.path
|
|
9
9
|
import subprocess
|
|
10
|
+
import concurrent.futures
|
|
10
11
|
|
|
11
|
-
from .uci import UCI
|
|
12
|
-
from
|
|
13
|
-
from .reports import Reports
|
|
14
|
-
from .wdm import wdmInterface
|
|
15
|
-
from
|
|
12
|
+
from hspf.uci import UCI
|
|
13
|
+
from hspf import hbn
|
|
14
|
+
from hspf.reports import Reports
|
|
15
|
+
from hspf.wdm import wdmInterface
|
|
16
|
+
from hspf import wdmReader
|
|
16
17
|
|
|
17
18
|
|
|
18
19
|
|
|
19
20
|
|
|
20
21
|
|
|
22
|
+
winHSPF = str(Path(__file__).resolve().parent) + '\\bin\\WinHSPFLt\\WinHspfLt.exe'
|
|
21
23
|
|
|
22
24
|
|
|
23
25
|
# Only for accessing information regarding a specific uci_file
|
|
@@ -51,6 +53,15 @@ class hspfModel():
|
|
|
51
53
|
# Compositions
|
|
52
54
|
self.reports = Reports(self.uci,self.hbns,self.wdms)
|
|
53
55
|
|
|
56
|
+
def _reinitialize(self,uci_file:str,run_model:bool = False):
|
|
57
|
+
self.uci = UCI(uci_file)
|
|
58
|
+
self.validate_uci(run_model = run_model)
|
|
59
|
+
self.hbns = hbn.hbnInterface(self.hbn_paths)
|
|
60
|
+
try:
|
|
61
|
+
self.wdms = wdmInterface(self.wdm_paths)
|
|
62
|
+
except:
|
|
63
|
+
self.wdms = None
|
|
64
|
+
self.reports = Reports(self.uci,self.hbns,self.wdms)
|
|
54
65
|
|
|
55
66
|
def validate_wdms(self):
|
|
56
67
|
# Ensure wdm files exist and the folders for the other file types exist relative
|
|
@@ -92,15 +103,16 @@ class hspfModel():
|
|
|
92
103
|
else:
|
|
93
104
|
self.run_model()
|
|
94
105
|
|
|
95
|
-
def run_model(self,new_uci_file = None):
|
|
106
|
+
def run_model(self,new_uci_file = None,):
|
|
96
107
|
|
|
97
108
|
if new_uci_file is None:
|
|
98
109
|
new_uci_file = self.uci_file
|
|
99
110
|
|
|
100
111
|
# new_uci_file = self.model_path.joinpath(uci_name)
|
|
101
112
|
# self.uci.write(new_uci_file)
|
|
102
|
-
|
|
103
|
-
self.
|
|
113
|
+
|
|
114
|
+
subprocess.run([winHSPF,self.uci_file.as_posix()]) #, stdout=subprocess.PIPE, creationflags=0x08000000)
|
|
115
|
+
self._reinitialize(new_uci_file,run_model = False)
|
|
104
116
|
|
|
105
117
|
def load_hbn(self,hbn_name):
|
|
106
118
|
self.hbns[hbn_name] = hbn.hbnClass(self.uci_file.parent.joinpath(hbn_name).as_posix())
|
|
@@ -177,8 +189,35 @@ class hspfModel():
|
|
|
177
189
|
|
|
178
190
|
|
|
179
191
|
|
|
192
|
+
def run_uci(uci_file:str, ):
|
|
193
|
+
"""
|
|
194
|
+
convenience function to run a single model uci file.
|
|
195
|
+
"""
|
|
196
|
+
print(f"Starting model: {uci_file}")
|
|
197
|
+
subprocess.run([winHSPF, uci_file])
|
|
198
|
+
print(f"Completed model: {uci_file}")
|
|
180
199
|
|
|
181
200
|
|
|
201
|
+
def run_batch_files(file_list, max_concurrent=4):
|
|
202
|
+
"""
|
|
203
|
+
Takes a list of .uci file paths and runs them N at a time.
|
|
204
|
+
"""
|
|
205
|
+
# Create a pool of workers (threads)
|
|
206
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=max_concurrent) as executor:
|
|
207
|
+
# Submit all jobs to the pool
|
|
208
|
+
future_to_file = {
|
|
209
|
+
executor.submit(run_uci, uci_file): uci_file
|
|
210
|
+
for uci_file in file_list
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
# Monitor completion (optional, but good for error catching)
|
|
214
|
+
for future in concurrent.futures.as_completed(future_to_file):
|
|
215
|
+
uci_file = future_to_file[future]
|
|
216
|
+
try:
|
|
217
|
+
future.result() # This will raise exceptions if run_uci failed
|
|
218
|
+
except Exception as exc:
|
|
219
|
+
print(f"File {uci_file} generated an exception: {exc}")
|
|
220
|
+
|
|
182
221
|
|
|
183
222
|
# class runManager():
|
|
184
223
|
# def __init__()
|
hspf/parser/graph.py
CHANGED
|
@@ -635,7 +635,8 @@ class reachNetwork():
|
|
|
635
635
|
areas = areas.groupby(['source_type','source_type_id','source_name'])['area'].sum()[['PERLND','IMPLND']]
|
|
636
636
|
|
|
637
637
|
if group:
|
|
638
|
-
areas =
|
|
638
|
+
areas = areas.groupby(['source_type','source_name']).sum()
|
|
639
|
+
#areas = pd.concat([areas[operation].groupby('source_name').sum() for operation in ['PERLND','IMPLND']])
|
|
639
640
|
#areas = pd.concat([areas[operation].groupby(self.uci.opnid_dict[operation].loc[areas[operation].index,'LSID'].values).sum() for operation in ['PERLND','IMPLND']])
|
|
640
641
|
return areas
|
|
641
642
|
|
hspf/reports.py
CHANGED
|
@@ -25,8 +25,8 @@ class Reports():
|
|
|
25
25
|
return scour(self.hbns,self.uci,start_year = start_year,end_year=end_year)
|
|
26
26
|
|
|
27
27
|
# Hydrology Reports
|
|
28
|
-
def landcover_area(self):
|
|
29
|
-
return
|
|
28
|
+
def landcover_area(self,reach_ids,upstream_reach_ids = None):
|
|
29
|
+
return watershed_landcover_areas(self.uci,reach_ids,upstream_reach_ids)
|
|
30
30
|
|
|
31
31
|
def annual_water_budget(self,operation):
|
|
32
32
|
assert operation in ['PERLND','RCHRES','IMPLND']
|
|
@@ -36,10 +36,37 @@ class Reports():
|
|
|
36
36
|
return annual_implnd_water_budget(self.uci,self.hbns)
|
|
37
37
|
else:
|
|
38
38
|
return annual_reach_water_budget(self.uci,self.hbns)
|
|
39
|
+
|
|
40
|
+
def annual_precip(self):
|
|
41
|
+
return avg_annual_precip(self.uci,self.wdms)
|
|
42
|
+
|
|
43
|
+
def simulated_et(self):
|
|
44
|
+
return simulated_et(self.uci,self.hbns)
|
|
45
|
+
|
|
46
|
+
def annual_perlnd_runoff(self,reach_ids = None,upstream_reach_ids = None,start_year = 1996,end_year = 2100):
|
|
47
|
+
if (reach_ids is None) and (upstream_reach_ids is None):
|
|
48
|
+
opnids = None
|
|
49
|
+
else:
|
|
50
|
+
opnids = self.uci.network.get_opnids('PERLND',reach_ids,upstream_reach_ids)
|
|
51
|
+
return annual_perlnd_runoff(self.uci,self.hbns,opnids,start_year,end_year)
|
|
52
|
+
|
|
53
|
+
#% Catchment and Watershed Loading (Edge of Field Load) Reports
|
|
54
|
+
#
|
|
55
|
+
def average_annual_catchment_loading(self,constituent,by_landcover = False,start_year = 1996,end_year = 2100):
|
|
56
|
+
return average_annual_catchment_loading(self.uci,self.hbns,constituent,by_landcover = by_landcover,start_year = start_year,end_year = end_year)
|
|
57
|
+
|
|
58
|
+
def average_monthly_catchment_loading(self,constituent,by_landcover = False,start_year = 1996,end_year = 2100):
|
|
59
|
+
return average_monthly_catchment_loading(self.uci,self.hbns,constituent,by_landcover = by_landcover,start_year = start_year,end_year = end_year)
|
|
60
|
+
|
|
61
|
+
def average_annual_watershed_loading(self,constituent,reach_ids,upstream_reach_ids = None, start_year = 1996, end_year = 2100, by_landcover = False,drainage_area = None):
|
|
62
|
+
return average_annual_watershed_loading(self.uci,self.hbns,constituent,reach_ids,upstream_reach_ids, start_year, end_year, by_landcover,drainage_area)
|
|
63
|
+
|
|
64
|
+
def average_monthly_watershed_loading(self,constituent,reach_ids,upstream_reach_ids = None, start_year = 1996, end_year = 2100, by_landcover = False,drainage_area = None):
|
|
65
|
+
return average_monthly_watershed_loading(self.uci,self.hbns,constituent,reach_ids,upstream_reach_ids, start_year, end_year, by_landcover,drainage_area)
|
|
39
66
|
|
|
40
67
|
def watershed_loading(self,constituent,reach_ids,upstream_reach_ids = None,by_landcover = False):
|
|
41
68
|
'''
|
|
42
|
-
Calculate the loading to channels from
|
|
69
|
+
Calculate the edge of field loading to channels from each catchment within the watershed defined by reach_ids and upstream_reach_ids.
|
|
43
70
|
|
|
44
71
|
Parameters
|
|
45
72
|
----------
|
|
@@ -57,40 +84,40 @@ class Reports():
|
|
|
57
84
|
def catchment_loading(self,constituent,by_landcover = False):
|
|
58
85
|
return get_catchment_loading(self.uci,self.hbns,constituent,by_landcover)
|
|
59
86
|
|
|
87
|
+
# def average_annual_subwatershed_loading(self,constituent):
|
|
88
|
+
# return ann_avg_subwatershed_loading(constituent,self.uci, self.hbns)
|
|
89
|
+
|
|
90
|
+
# def average_annual_watershed_loading(self,constituent,reach_ids):
|
|
91
|
+
# landcovers = ann_avg_watershed_loading(constituent,reach_ids,self.uci, self.hbns, True)
|
|
92
|
+
# total = ann_avg_watershed_loading(constituent,reach_ids,self.uci, self.hbns, False)
|
|
93
|
+
# total.index = ['Total']
|
|
94
|
+
# total = pd.concat([landcovers,total])
|
|
95
|
+
# total['volume'] = total['area']*total[f'weighted_mean_{constituent}']
|
|
96
|
+
# total['volume_percent'] = total['volume']/total.loc['Total','volume']*100
|
|
97
|
+
# total['area_percent'] = total['area']/total.loc['Total','area']*100
|
|
98
|
+
# total['share'] = total['volume_percent']/total['area_percent']
|
|
99
|
+
# return total
|
|
100
|
+
# Contributions Reports
|
|
60
101
|
def contributions(self,constituent,target_reach_id):
|
|
61
102
|
return total_contributions(constituent,self.uci,self.hbns,target_reach_id)
|
|
62
103
|
|
|
63
104
|
def landcover_contributions(self,constituent,target_reach_id,landcover = None):
|
|
64
105
|
return catchment_contributions(self.uci,self.hbns,constituent,target_reach_id)
|
|
65
106
|
|
|
66
|
-
|
|
67
|
-
return ann_avg_subwatershed_loading(constituent,self.uci, self.hbns)
|
|
68
|
-
|
|
69
|
-
def ann_avg_watershed_loading(self,constituent,reach_ids):
|
|
70
|
-
landcovers = ann_avg_watershed_loading(constituent,reach_ids,self.uci, self.hbns, True)
|
|
71
|
-
total = ann_avg_watershed_loading(constituent,reach_ids,self.uci, self.hbns, False)
|
|
72
|
-
total.index = ['Total']
|
|
73
|
-
total = pd.concat([landcovers,total])
|
|
74
|
-
total['volume'] = total['area']*total[f'weighted_mean_{constituent}']
|
|
75
|
-
total['volume_percent'] = total['volume']/total.loc['Total','volume']*100
|
|
76
|
-
total['area_percent'] = total['area']/total.loc['Total','area']*100
|
|
77
|
-
total['share'] = total['volume_percent']/total['area_percent']
|
|
78
|
-
return total
|
|
107
|
+
# Landscape Yield Reports
|
|
79
108
|
|
|
80
|
-
def
|
|
81
|
-
df=
|
|
109
|
+
def average_annual_yield(self,constituent,reach_ids,upstream_reach_ids = None,start_year = 1996,end_year = 2100):
|
|
110
|
+
df= average_annual_yield(self.uci,self.hbns,constituent,reach_ids,upstream_reach_ids,start_year,end_year)
|
|
111
|
+
return df
|
|
112
|
+
|
|
113
|
+
def average_monthly_yield(self,constituent,reach_ids,upstream_reach_ids = None,start_year = 1996,end_year = 2100):
|
|
114
|
+
df= average_monthly_yield(self.uci,self.hbns,constituent,reach_ids,upstream_reach_ids,start_year,end_year)
|
|
82
115
|
return df
|
|
83
116
|
|
|
84
|
-
|
|
85
|
-
return avg_annual_precip(self.uci,self.wdms)
|
|
86
|
-
|
|
87
|
-
def simulated_et(self):
|
|
88
|
-
return simulated_et(self.uci,self.hbns)
|
|
89
|
-
|
|
90
|
-
|
|
117
|
+
|
|
91
118
|
|
|
92
119
|
#%% Channel Reports
|
|
93
|
-
def scour(hbn,uci,start_year =
|
|
120
|
+
def scour(hbn,uci,start_year = 1996,end_year = 2030):
|
|
94
121
|
# Should eventually create an entire reports module or class indorder to calculate all of the different model checks
|
|
95
122
|
# TODO: Incorporate IMPLNDS
|
|
96
123
|
schematic = uci.table('SCHEMATIC').copy()
|
|
@@ -103,7 +130,7 @@ def scour(hbn,uci,start_year = '1996',end_year = '2030'):
|
|
|
103
130
|
activity = 'SEDMNT',
|
|
104
131
|
t_code = 'yearly',
|
|
105
132
|
opnids = None)
|
|
106
|
-
sosed = sosed.loc[(sosed.index
|
|
133
|
+
sosed = sosed.loc[(sosed.index.year >= start_year) & (sosed.index.year <= end_year)].mean().rename('mean').to_frame()
|
|
107
134
|
|
|
108
135
|
sosld = hbn.get_multiple_timeseries(t_opn = 'IMPLND',
|
|
109
136
|
t_con = 'SOSLD',
|
|
@@ -124,7 +151,7 @@ def scour(hbn,uci,start_year = '1996',end_year = '2030'):
|
|
|
124
151
|
scour_report = []
|
|
125
152
|
# schematic block will have all the possible perlands while sosed only has perlands that were simulated
|
|
126
153
|
# in other words information from sosed is a subset of schematic
|
|
127
|
-
for tvolno in lakeflag.index: #schematic['TVOLNO'].unique():
|
|
154
|
+
for tvolno in lakeflag.index.intersection(uci.opnid_dict['RCHRES'].index): #schematic['TVOLNO'].unique():
|
|
128
155
|
implnd_load = 0
|
|
129
156
|
prlnd_load = 0
|
|
130
157
|
reach_load = depscr.loc[tvolno].values[0]
|
|
@@ -171,43 +198,91 @@ def get_catchments(uci,reach_ids):
|
|
|
171
198
|
|
|
172
199
|
|
|
173
200
|
|
|
174
|
-
#%%
|
|
201
|
+
#%% Constituent Loads
|
|
175
202
|
|
|
176
|
-
def
|
|
177
|
-
|
|
203
|
+
def _constituent_load(hbn,constituent,time_step = 5):
|
|
204
|
+
#reach_ids = uci.network.G.nodes
|
|
205
|
+
if constituent == 'Q':
|
|
206
|
+
units = 'acrft'
|
|
207
|
+
else:
|
|
208
|
+
units = 'lb'
|
|
178
209
|
|
|
210
|
+
df = hbn.get_rchres_output(constituent,units,time_step)
|
|
179
211
|
|
|
180
|
-
|
|
181
|
-
hbn.get_rchres_data('TSS',reach_id,'lb','yearly').mean()*2000/uci.network.drainage_area(reach_id)
|
|
212
|
+
return df
|
|
182
213
|
|
|
183
|
-
def
|
|
214
|
+
def constituent_load(hbn,constituent,reach_ids,time_step = 5,upstream_reach_ids = None):
|
|
184
215
|
#reach_ids = uci.network.G.nodes
|
|
185
|
-
|
|
186
|
-
reach_ids = uci.network.get_opnids('RCHRES',reach_ids,upstream_reach_ids)
|
|
187
|
-
area = uci.network.drainage_area(reach_ids,upstream_reach_ids)
|
|
188
|
-
|
|
189
216
|
if constituent == 'Q':
|
|
190
217
|
units = 'acrft'
|
|
191
218
|
else:
|
|
192
219
|
units = 'lb'
|
|
220
|
+
|
|
221
|
+
upstream_load = 0
|
|
222
|
+
if upstream_reach_ids is not None:
|
|
223
|
+
upstream_load = constituent_load(hbn,constituent,upstream_reach_ids,time_step)
|
|
224
|
+
|
|
225
|
+
df = hbn.get_reach_constituent(constituent,reach_ids,time_step,unit =units) - upstream_load
|
|
226
|
+
|
|
227
|
+
return df
|
|
228
|
+
|
|
229
|
+
#%% Landscape Yields
|
|
230
|
+
|
|
231
|
+
def _constituent_yield(uci,hbn,constituent,time_step = 5):
|
|
232
|
+
df = _constituent_load(hbn,constituent,time_step)
|
|
233
|
+
|
|
234
|
+
areas = [uci.network.drainage_area([reach_id]) for reach_id in df.columns]
|
|
235
|
+
return df/areas
|
|
236
|
+
|
|
237
|
+
def constituent_yield(uci,hbn,constituent,reach_ids,time_step = 5,upstream_reach_ids = None,drainage_area = None):
|
|
238
|
+
#reach_ids = uci.network.G.nodes
|
|
239
|
+
|
|
240
|
+
if drainage_area is None:
|
|
241
|
+
drainage_area = uci.network.drainage_area(reach_ids,upstream_reach_ids)
|
|
242
|
+
|
|
243
|
+
df = constituent_load(hbn,constituent,reach_ids,time_step,upstream_reach_ids)/drainage_area
|
|
193
244
|
|
|
194
|
-
df
|
|
245
|
+
return df
|
|
246
|
+
|
|
247
|
+
def average_annual_yield(uci,hbn,constituent,reach_ids,upstream_reach_ids = None,start_year = 1996,end_year = 2100,drainage_area = None):
|
|
248
|
+
df = constituent_yield(uci,hbn,constituent,reach_ids,5,upstream_reach_ids,drainage_area)
|
|
249
|
+
df = df.loc[(df.index.year >= start_year) & (df.index.year <= end_year)].mean()
|
|
250
|
+
return df
|
|
251
|
+
|
|
252
|
+
def average_monthly_yield(uci,hbn,constituent,reach_ids,upstream_reach_ids = None,start_year = 1996,end_year = 2100,drainage_area = None):
|
|
253
|
+
df = constituent_yield(uci,hbn,constituent,reach_ids,4,upstream_reach_ids,drainage_area)
|
|
254
|
+
df = df.loc[(df.index.year >= start_year) & (df.index.year <= end_year)]
|
|
255
|
+
df = df.groupby(df.index.month).mean()
|
|
256
|
+
return df
|
|
257
|
+
|
|
195
258
|
|
|
196
|
-
return df/area
|
|
197
259
|
|
|
198
|
-
#%% Catchment and Watershed Loading
|
|
260
|
+
#%% Catchment and Watershed Loading (Edge of Field Load)
|
|
199
261
|
|
|
200
|
-
def
|
|
201
|
-
df = uci.network.
|
|
202
|
-
df['percent'] = 100*(df['
|
|
262
|
+
def watershed_landcover_areas(uci,reach_ids,upstream_reach_ids = None):
|
|
263
|
+
df = uci.network.drainage_area_landcover(reach_ids,upstream_reach_ids,group=True).reset_index()
|
|
264
|
+
df['percent'] = 100*(df['area']/df['area'].sum())
|
|
203
265
|
return df
|
|
204
266
|
|
|
267
|
+
def catchment_landcover_areas(uci,reach_ids = None):
|
|
268
|
+
df = uci.network.subwatersheds().reset_index()[['TVOLNO','SVOL','LSID','AFACTR']]
|
|
269
|
+
df.rename(columns = {'AFACTR':'area',
|
|
270
|
+
'TVOLNO':'catchment_id',
|
|
271
|
+
'LSID':'landcover',
|
|
272
|
+
'SVOL':'landcover_type'},inplace = True)
|
|
273
|
+
if reach_ids is not None:
|
|
274
|
+
df = df.loc[df['catchment_id'].isin(reach_ids)]
|
|
275
|
+
return df
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
|
|
205
279
|
def catchment_areas(uci):
|
|
206
280
|
df = uci.network.subwatersheds().reset_index()
|
|
207
281
|
df = df.groupby('TVOLNO')['AFACTR'].sum().reset_index()
|
|
208
282
|
df.rename(columns = {'AFACTR':'catchment_area'},inplace = True)
|
|
209
283
|
return df
|
|
210
284
|
|
|
285
|
+
|
|
211
286
|
def get_constituent_loading(uci,hbn,constituent,time_step = 5):
|
|
212
287
|
|
|
213
288
|
|
|
@@ -237,42 +312,170 @@ def get_constituent_loading(uci,hbn,constituent,time_step = 5):
|
|
|
237
312
|
# df['time_step'] = time_step
|
|
238
313
|
# df['year'] = pd.DatetimeIndex(df['datetime']).year
|
|
239
314
|
# df['month'] = pd.DatetimeIndex(df['datetime']).month
|
|
315
|
+
return df
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
# subwatersheds = uci.network.subwatersheds().reset_index()
|
|
319
|
+
# subwatersheds = subwatersheds.loc[subwatersheds['SVOL'].isin(['PERLND','IMPLND'])]
|
|
320
|
+
# areas = catchment_areas(uci)
|
|
321
|
+
|
|
322
|
+
# df = pd.merge(subwatersheds,df,left_on = ['SVOL','SVOLNO'], right_on=['OPERATION','OPNID'],how='right')
|
|
323
|
+
# df = pd.merge(df,areas,left_on = ['TVOLNO'], right_on='TVOLNO',how='left')
|
|
324
|
+
# df['load'] = df['value']*df['AFACTR']
|
|
325
|
+
# df = df.rename(columns = {'value':'loading_rate', 'AFACTR':'landcover_area','LSID':'landcover'})
|
|
326
|
+
# df['constituent'] = constituent
|
|
327
|
+
# return df[['index','constituent','TVOLNO','SVOLNO','SVOL','landcover','landcover_area','catchment_area','loading_rate','load']]
|
|
328
|
+
def _join_catchments(df,uci,constituent):
|
|
329
|
+
subwatersheds = uci.network.subwatersheds().reset_index()
|
|
330
|
+
subwatersheds = subwatersheds.loc[subwatersheds['SVOL'].isin(['PERLND','IMPLND'])]
|
|
331
|
+
areas = catchment_areas(uci)
|
|
332
|
+
|
|
333
|
+
df = pd.merge(
|
|
334
|
+
subwatersheds, df,
|
|
335
|
+
left_on=['SVOL', 'SVOLNO'],
|
|
336
|
+
right_on=['OPERATION', 'OPNID'],
|
|
337
|
+
how='inner'
|
|
338
|
+
)
|
|
339
|
+
df = pd.merge(df, areas, on='TVOLNO', how='left')
|
|
340
|
+
|
|
341
|
+
df['load'] = df['value'] * df['AFACTR']
|
|
342
|
+
df = df.rename(columns={
|
|
343
|
+
'value': 'loading_rate',
|
|
344
|
+
'AFACTR': 'landcover_area',
|
|
345
|
+
'LSID': 'landcover'
|
|
346
|
+
})
|
|
347
|
+
df['constituent'] = constituent
|
|
348
|
+
return df
|
|
349
|
+
|
|
350
|
+
def get_catchment_loading(uci,hbn,constituent,time_step=5,by_landcover = False):
|
|
351
|
+
df = get_constituent_loading(uci,hbn,constituent,time_step)
|
|
352
|
+
|
|
353
|
+
subwatersheds = uci.network.subwatersheds().reset_index()
|
|
354
|
+
subwatersheds = subwatersheds.loc[subwatersheds['SVOL'].isin(['PERLND','IMPLND'])]
|
|
355
|
+
areas = catchment_areas(uci)
|
|
356
|
+
|
|
357
|
+
|
|
358
|
+
df = pd.merge(subwatersheds,df,left_on = ['SVOL','SVOLNO'], right_on=['OPERATION','OPNID'],how='inner')
|
|
359
|
+
df = pd.merge(df,areas,left_on = ['TVOLNO'], right_on='TVOLNO',how='left')
|
|
360
|
+
df['load'] = df['value']*df['AFACTR']
|
|
361
|
+
df = df.rename(columns = {'value':'loading_rate', 'AFACTR':'landcover_area','LSID':'landcover'})
|
|
362
|
+
df['constituent'] = constituent
|
|
363
|
+
df = df[['index','constituent','TVOLNO','SVOLNO','SVOL','landcover','landcover_area','catchment_area','loading_rate','load']]
|
|
364
|
+
return df
|
|
365
|
+
|
|
366
|
+
|
|
240
367
|
|
|
368
|
+
def get_watershed_loading(uci,hbn,reach_ids,constituent,upstream_reach_ids = None,by_landcover = False,time_step = 5):
|
|
369
|
+
'''
|
|
370
|
+
Edge of field loading for all catchments within a watershed defined by reach_ids and upstream_reach_ids
|
|
371
|
+
|
|
372
|
+
|
|
373
|
+
'''
|
|
374
|
+
reach_ids = uci.network.get_opnids('RCHRES',reach_ids,upstream_reach_ids)
|
|
375
|
+
|
|
376
|
+
df = get_catchment_loading(uci,hbn,constituent,time_step)
|
|
377
|
+
df = df.loc[df['TVOLNO'].isin(reach_ids)]
|
|
378
|
+
return df
|
|
379
|
+
|
|
380
|
+
|
|
381
|
+
def average_annual_constituent_loading(uci,hbn,constituent,start_year = 1996,end_year = 2100):
|
|
382
|
+
df = get_constituent_loading(uci,hbn,constituent,time_step=5)
|
|
383
|
+
df = df.loc[(df['index'].dt.year >= start_year) & (df['index'].dt.year <= end_year)]
|
|
384
|
+
df['year'] = df['index'].dt.year
|
|
385
|
+
df = df.groupby(['OPERATION','OPNID'])['value'].mean().reset_index()
|
|
386
|
+
return df
|
|
387
|
+
|
|
388
|
+
def average_monthly_constituent_loading(uci,hbn,constituent,start_year = 1996,end_year = 2100):
|
|
389
|
+
df = get_constituent_loading(uci,hbn,constituent,time_step=4)
|
|
390
|
+
df = df.loc[(df['index'].dt.year >= start_year) & (df['index'].dt.year <= end_year)]
|
|
391
|
+
df['month'] = df['index'].dt.month
|
|
392
|
+
df = df.groupby(['month','OPERATION','OPNID'])['value'].mean().reset_index()
|
|
393
|
+
return df
|
|
394
|
+
|
|
395
|
+
def average_annual_catchment_loading(uci,hbn,constituent,start_year = 1996,end_year = 2100,by_landcover = False):
|
|
396
|
+
df = average_annual_constituent_loading(uci,hbn,constituent,start_year,end_year)
|
|
241
397
|
|
|
242
398
|
subwatersheds = uci.network.subwatersheds().reset_index()
|
|
243
399
|
subwatersheds = subwatersheds.loc[subwatersheds['SVOL'].isin(['PERLND','IMPLND'])]
|
|
244
400
|
areas = catchment_areas(uci)
|
|
245
401
|
|
|
246
|
-
|
|
402
|
+
|
|
403
|
+
df = pd.merge(subwatersheds,df,left_on = ['SVOL','SVOLNO'], right_on=['OPERATION','OPNID'],how='inner')
|
|
247
404
|
df = pd.merge(df,areas,left_on = ['TVOLNO'], right_on='TVOLNO',how='left')
|
|
248
405
|
df['load'] = df['value']*df['AFACTR']
|
|
249
406
|
df = df.rename(columns = {'value':'loading_rate', 'AFACTR':'landcover_area','LSID':'landcover'})
|
|
250
407
|
df['constituent'] = constituent
|
|
251
|
-
|
|
408
|
+
df = df[['constituent','TVOLNO','SVOLNO','SVOL','landcover','landcover_area','catchment_area','loading_rate','load']]
|
|
252
409
|
|
|
253
410
|
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
if not by_landcover:
|
|
257
|
-
df = df.groupby(['TVOLNO','constituent'])[['landcover_area','load']].sum().reset_index()
|
|
411
|
+
if by_landcover:
|
|
412
|
+
df = df.groupby(['TVOLNO','landcover','constituent'])[['landcover_area','load']].sum().reset_index()
|
|
258
413
|
df['loading_rate'] = df['load']/df['landcover_area']
|
|
414
|
+
else:
|
|
415
|
+
df = df.groupby(['TVOLNO','constituent','catchment_area'])[['load']].sum().reset_index()
|
|
416
|
+
df['loading_rate'] = df['load']/df['catchment_area']
|
|
259
417
|
return df
|
|
260
418
|
|
|
261
|
-
def
|
|
262
|
-
|
|
419
|
+
def average_monthly_catchment_loading(uci,hbn,constituent,start_year = 1996,end_year = 2100,by_landcover = False):
|
|
420
|
+
df = average_monthly_constituent_loading(uci,hbn,constituent,start_year,end_year)
|
|
421
|
+
|
|
422
|
+
subwatersheds = uci.network.subwatersheds().reset_index()
|
|
423
|
+
subwatersheds = subwatersheds.loc[subwatersheds['SVOL'].isin(['PERLND','IMPLND'])]
|
|
424
|
+
areas = catchment_areas(uci)
|
|
263
425
|
|
|
264
|
-
|
|
426
|
+
|
|
427
|
+
df = pd.merge(subwatersheds,df,left_on = ['SVOL','SVOLNO'], right_on=['OPERATION','OPNID'],how='inner')
|
|
428
|
+
df = pd.merge(df,areas,left_on = ['TVOLNO'], right_on='TVOLNO',how='left')
|
|
429
|
+
df['load'] = df['value']*df['AFACTR']
|
|
430
|
+
df = df.rename(columns = {'value':'loading_rate', 'AFACTR':'landcover_area','LSID':'landcover'})
|
|
431
|
+
df['constituent'] = constituent
|
|
432
|
+
df = df[['month','constituent','TVOLNO','SVOLNO','SVOL','landcover','landcover_area','catchment_area','loading_rate','load']]
|
|
433
|
+
|
|
434
|
+
|
|
435
|
+
if by_landcover:
|
|
436
|
+
df = df.groupby(['month','TVOLNO','landcover','constituent'])[['landcover_area','load']].sum().reset_index()
|
|
437
|
+
df['loading_rate'] = df['load']/df['landcover_area']
|
|
438
|
+
else:
|
|
439
|
+
df = df.groupby(['month','TVOLNO','constituent','catchment_area'])[['load']].sum().reset_index()
|
|
440
|
+
df['loading_rate'] = df['load']/df['catchment_area']
|
|
441
|
+
return df
|
|
442
|
+
|
|
443
|
+
|
|
444
|
+
|
|
445
|
+
def average_annual_watershed_loading(uci,hbn,constituent,reach_ids, upstream_reach_ids = None, start_year = 1996, end_year = 2100, by_landcover = False,drainage_area = None):
|
|
446
|
+
df = average_annual_catchment_loading(uci,hbn,constituent,by_landcover = by_landcover,start_year = start_year,end_year = end_year)
|
|
447
|
+
reach_ids = uci.network.get_opnids('RCHRES',reach_ids,upstream_reach_ids)
|
|
265
448
|
df = df.loc[df['TVOLNO'].isin(reach_ids)]
|
|
449
|
+
if drainage_area is None:
|
|
450
|
+
df['watershed_area'] = uci.network.drainage_area(reach_ids,upstream_reach_ids) #df.drop_duplicates(subset=['TVOLNO'])['catchment_area'].sum()
|
|
451
|
+
else:
|
|
452
|
+
df['watershed_area'] = drainage_area
|
|
266
453
|
|
|
267
454
|
if by_landcover:
|
|
268
455
|
df = df.groupby(['landcover','constituent'])[['landcover_area','load']].sum().reset_index()
|
|
269
456
|
df['loading_rate'] = df['load']/df['landcover_area']
|
|
270
457
|
else:
|
|
271
|
-
df = df.groupby(['constituent'])[['
|
|
272
|
-
df['loading_rate'] = df['load']/df['
|
|
458
|
+
df = df.groupby(['constituent','watershed_area'])[['load']].sum().reset_index()
|
|
459
|
+
df['loading_rate'] = df['load']/df['watershed_area']
|
|
273
460
|
|
|
274
461
|
return df
|
|
275
462
|
|
|
463
|
+
def average_monthly_watershed_loading(uci,hbn,constituent,reach_ids, upstream_reach_ids = None, start_year = 1996, end_year = 2100, by_landcover = False,drainage_area = None):
|
|
464
|
+
df = average_monthly_catchment_loading(uci,hbn,constituent,by_landcover = by_landcover,start_year = start_year,end_year = end_year)
|
|
465
|
+
reach_ids = uci.network.get_opnids('RCHRES',reach_ids,upstream_reach_ids)
|
|
466
|
+
df = df.loc[df['TVOLNO'].isin(reach_ids)]
|
|
467
|
+
if drainage_area is None:
|
|
468
|
+
df['watershed_area'] = uci.network.drainage_area(reach_ids,upstream_reach_ids) #df.drop_duplicates(subset=['TVOLNO'])['catchment_area'].sum()
|
|
469
|
+
else:
|
|
470
|
+
df['watershed_area'] = drainage_area
|
|
471
|
+
|
|
472
|
+
if by_landcover:
|
|
473
|
+
df = df.groupby(['month','TVOLNO','landcover','constituent'])[['landcover_area','load']].sum().reset_index()
|
|
474
|
+
df['loading_rate'] = df['load']/df['landcover_area']
|
|
475
|
+
else:
|
|
476
|
+
df = df.groupby(['month','constituent','watershed_area'])['load'].sum().reset_index()
|
|
477
|
+
df['loading_rate'] = df['load']/df['watershed_area']
|
|
478
|
+
return df
|
|
276
479
|
|
|
277
480
|
#%% Contributions
|
|
278
481
|
allocation_selector = {'Q': {'input': ['IVOL'],
|
|
@@ -427,6 +630,7 @@ def weighted_describe(df, value_col, weight_col):
|
|
|
427
630
|
# })
|
|
428
631
|
|
|
429
632
|
|
|
633
|
+
|
|
430
634
|
def monthly_avg_constituent_loading(constituent,uci,hbn):
|
|
431
635
|
dfs = []
|
|
432
636
|
for t_opn in ['PERLND','IMPLND']:
|
|
@@ -729,9 +933,27 @@ def avg_annual_precip(uci,wdm):
|
|
|
729
933
|
|
|
730
934
|
#%%
|
|
731
935
|
#%%% Other Reports
|
|
936
|
+
# ts_names = ['PRECIP','PERO','AGWO','IFWO','SURO']
|
|
937
|
+
# operation = 'PERLND'
|
|
938
|
+
# t_code = 4
|
|
939
|
+
# df = pd.concat([hbn.get_multiple_timeseries(operation,t_code,ts_name,opnids = opnids) for ts_name in ts_names],axis =0)
|
|
940
|
+
# df = pd.merge(df,subwatershed.reset_index(),left_on = ['OPERATION','OPNID'],right_on = ['SVOL','SVOLNO'],how='inner')
|
|
941
|
+
# df[['datetime','TVOLNO','OPERATION','OPNID','LSID','AFACTR','TIMESERIES','value']]
|
|
732
942
|
|
|
943
|
+
# grouping_columns = ['TVOLNO','LSID','TIMESERIES']
|
|
944
|
+
# df.groupby[grouping_columns]
|
|
733
945
|
|
|
946
|
+
def weighted_output(uci,hbn,ts_name,operation = 'PERLND',t_code = 5,opnids = None):
|
|
947
|
+
df = hbn.get_multiple_timeseries(operation,t_code,ts_name,opnids = opnids)
|
|
734
948
|
|
|
949
|
+
subwatersheds = uci.network.subwatersheds().reset_index()
|
|
950
|
+
subwatersheds = subwatersheds.loc[subwatersheds['SVOL'] == operation]
|
|
951
|
+
|
|
952
|
+
|
|
953
|
+
df = pd.merge(subwatersheds,df,left_on = 'SVOLNO', right_on='SVOLNO',how='left')
|
|
954
|
+
df = weighted_mean(df,ts_name,'AFACTR')
|
|
955
|
+
df = df.set_index([df.index,'AFACTR'])
|
|
956
|
+
return df
|
|
735
957
|
|
|
736
958
|
def weighted_mean(df,value_col,weight_col):
|
|
737
959
|
weighted_mean = (df[value_col] * df[weight_col]).sum() / df[weight_col].sum()
|
|
@@ -739,9 +961,11 @@ def weighted_mean(df,value_col,weight_col):
|
|
|
739
961
|
'AFACTR' : df[weight_col].sum(),
|
|
740
962
|
value_col: [weighted_mean]})
|
|
741
963
|
|
|
742
|
-
def annual_weighted_output(uci,hbn,ts_name,operation = 'PERLND',t_code = 5,opnids = None,group_by = None):
|
|
964
|
+
def annual_weighted_output(uci,hbn,ts_name,operation = 'PERLND',t_code = 5,opnids = None,group_by = None,start_year = 1996,end_year = 2100):
|
|
743
965
|
assert (group_by in [None,'landcover','opnid'])
|
|
744
|
-
df = hbn.get_multiple_timeseries(operation,t_code,ts_name,opnids = opnids)
|
|
966
|
+
df = hbn.get_multiple_timeseries(operation,t_code,ts_name,opnids = opnids)
|
|
967
|
+
df = df.loc[(df.index.year >= start_year) & (df.index.year <= end_year)]
|
|
968
|
+
df = df.mean().reset_index()
|
|
745
969
|
df.columns = ['SVOLNO',ts_name]
|
|
746
970
|
subwatersheds = uci.network.subwatersheds().reset_index()
|
|
747
971
|
subwatersheds = subwatersheds.loc[subwatersheds['SVOL'] == operation]
|
|
@@ -761,41 +985,41 @@ def annual_weighted_output(uci,hbn,ts_name,operation = 'PERLND',t_code = 5,opnid
|
|
|
761
985
|
return df
|
|
762
986
|
|
|
763
987
|
|
|
764
|
-
def monthly_weighted_output(uci,hbn,ts_name,operation = 'PERLND',opnids = None, as_rate = False, by_landcover = True, months = [1,2,3,4,5,6,7,8,9,10,11,12]):
|
|
765
|
-
|
|
766
|
-
|
|
988
|
+
# def monthly_weighted_output(uci,hbn,ts_name,operation = 'PERLND',opnids = None, as_rate = False, by_landcover = True, months = [1,2,3,4,5,6,7,8,9,10,11,12]):
|
|
989
|
+
# df = hbn.get_multiple_timeseries(operation,4,ts_name,opnids = opnids)
|
|
990
|
+
# df = df.loc[df.index.month.isin(months)]
|
|
767
991
|
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
992
|
+
# areas = uci.network.operation_area(operation)
|
|
993
|
+
# areas.loc[areas.index.intersection(df.columns)]
|
|
994
|
+
# df = df[areas.index.intersection(df.columns)]
|
|
771
995
|
|
|
772
|
-
|
|
996
|
+
# df = (df.groupby(df.index.month).mean()*areas['AFACTR'])
|
|
773
997
|
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
998
|
+
# if by_landcover:
|
|
999
|
+
# df = df.T.groupby(areas['LSID']).sum().T
|
|
1000
|
+
# if as_rate:
|
|
1001
|
+
# df = df/areas['AFACTR'].groupby(areas['LSID']).sum().to_list()
|
|
1002
|
+
# else:
|
|
1003
|
+
# if as_rate:
|
|
1004
|
+
# df = df/areas['AFACTR'].sum()
|
|
781
1005
|
|
|
782
|
-
|
|
1006
|
+
# df.columns.name = ts_name
|
|
783
1007
|
|
|
784
|
-
|
|
1008
|
+
# return df
|
|
785
1009
|
|
|
786
|
-
def monthly_perlnd_runoff(uci,hbn):
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
1010
|
+
# def monthly_perlnd_runoff(uci,hbn):
|
|
1011
|
+
# ts_names = ['PRECIP','PERO','AGWO','IFWO','SURO']
|
|
1012
|
+
# df = pd.concat({ts_name:monthly_weighted_output(uci,hbn,ts_name,by_landcover=True,as_rate=True) for ts_name in ts_names},keys =ts_names)
|
|
1013
|
+
# suro_perc = (df.loc['SURO']/df.loc['PERO'])*100
|
|
1014
|
+
# suro_perc = suro_perc.reset_index()
|
|
1015
|
+
# suro_perc['name'] = 'SURO_perc'
|
|
1016
|
+
# suro_perc = suro_perc.set_index(['name','index'])
|
|
1017
|
+
# return pd.concat([df,suro_perc])
|
|
794
1018
|
|
|
795
1019
|
|
|
796
|
-
def annual_perlnd_runoff(uci,hbn):
|
|
1020
|
+
def annual_perlnd_runoff(uci,hbn,opnids = None,start_year=1996,end_year=2100):
|
|
797
1021
|
ts_names = ['PRECIP','PERO','AGWO','IFWO','SURO']
|
|
798
|
-
df = pd.concat([annual_weighted_output(uci,hbn,ts_name,group_by='landcover') for ts_name in ts_names],axis = 1)
|
|
1022
|
+
df = pd.concat([annual_weighted_output(uci,hbn,ts_name,group_by='landcover',opnids=opnids,start_year=start_year,end_year=end_year) for ts_name in ts_names],axis = 1)
|
|
799
1023
|
df.columns = ts_names
|
|
800
1024
|
df['suro_perc'] = (df['SURO']/df['PERO'])*100
|
|
801
1025
|
return df
|
|
@@ -860,48 +1084,6 @@ def annual_sediment_budget(uci,hbn):
|
|
|
860
1084
|
df.columns = ['Sediment','Percentage']
|
|
861
1085
|
return df
|
|
862
1086
|
|
|
863
|
-
# def annual_loading_rate():
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
# def annual_yield(uci,hbn,constituent):
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
def subwatershed_weighted_output(uci,hbn,reach_ids,ts_name,time_step,by_landcover=False,as_rate = True):
|
|
870
|
-
subwatersheds = uci.network.subwatersheds(reach_ids)
|
|
871
|
-
subwatersheds = subwatersheds.loc[subwatersheds['SVOL'] == 'PERLND']
|
|
872
|
-
|
|
873
|
-
areas = subwatersheds[['SVOLNO','AFACTR']].set_index('SVOLNO')
|
|
874
|
-
areas = areas.join( uci.table('PERLND','GEN-INFO')['LSID'])
|
|
875
|
-
opnids = subwatersheds['SVOLNO'].to_list()
|
|
876
|
-
|
|
877
|
-
df = hbn.get_multiple_timeseries('PERLND',time_step,ts_name,opnids = opnids)
|
|
878
|
-
|
|
879
|
-
areas.loc[areas.index.intersection(df.columns)]
|
|
880
|
-
df = df[areas.index.intersection(df.columns)]
|
|
881
|
-
|
|
882
|
-
if by_landcover:
|
|
883
|
-
df = (df*areas['AFACTR'].values).T.groupby(areas['LSID']).sum()
|
|
884
|
-
if as_rate:
|
|
885
|
-
df = df.T/areas['AFACTR'].groupby(areas['LSID']).sum().to_list()
|
|
886
|
-
df.columns.name = ts_name
|
|
887
|
-
else:
|
|
888
|
-
df = (df * areas['AFACTR'].values).sum(axis=1)
|
|
889
|
-
if as_rate:
|
|
890
|
-
df = df/areas['AFACTR'].sum()
|
|
891
|
-
df.name = ts_name
|
|
892
|
-
|
|
893
|
-
return df
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
# def perlnd_water_budget(uci,hbn,time_step = 5):
|
|
900
|
-
|
|
901
|
-
# ts_names = ['SUPY','SURO','IFWO','AGWO','PERO','AGWI','IGWI','PET','UZET','LZET','AGWET','BASET','TAET']
|
|
902
|
-
# dfs = [area_weighted_output(uci,hbn,ts_name,time_step) for ts_name in ts_names]
|
|
903
|
-
|
|
904
|
-
|
|
905
1087
|
|
|
906
1088
|
|
|
907
1089
|
#%% Phosphorous Loading Calculations
|
|
@@ -946,11 +1128,15 @@ def total_phosphorous(uci,hbn,t_code,operation = 'PERLND'):
|
|
|
946
1128
|
totals = []
|
|
947
1129
|
for mlno in opnids['MLNO'].unique():
|
|
948
1130
|
total = dissolved_orthophosphate(uci,hbn,operation,mlno,t_code) + particulate_orthophosphate(uci,hbn,operation,mlno, t_code) + organic_refactory_phosphorous(uci,hbn,operation,mlno,t_code) + labile_oxygen_demand(uci,hbn,operation,mlno,t_code)*0.007326 # Conversation factor to P
|
|
949
|
-
if
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
1131
|
+
if isinstance(total, (int, float)): #TODO fix for when no data is present. Don't like this workaround.
|
|
1132
|
+
pass
|
|
1133
|
+
elif not total.empty:
|
|
1134
|
+
valid_opnids = total.columns.intersection(opnids['SVOLNO'].loc[opnids['MLNO'] == mlno])
|
|
1135
|
+
totals.append(total[valid_opnids])
|
|
1136
|
+
|
|
1137
|
+
if len(totals) > 0:
|
|
1138
|
+
total = pd.concat(totals,axis=1)
|
|
1139
|
+
total = total.T.groupby(total.columns).sum().T
|
|
954
1140
|
return total
|
|
955
1141
|
|
|
956
1142
|
|
|
@@ -984,8 +1170,10 @@ def qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2 = '',t_code
|
|
|
984
1170
|
masslink.fillna({'MFACTOR': 1}, inplace=True)
|
|
985
1171
|
ts = 0
|
|
986
1172
|
for index,row in masslink.iterrows():
|
|
987
|
-
hbn_name =
|
|
988
|
-
hbn_name
|
|
1173
|
+
hbn_name = row['SMEMN']
|
|
1174
|
+
if hbn_name in ['IOQUAL','SOQUAL','POQUAL','AOQUAL']:
|
|
1175
|
+
qual_name = uci.table(operation,'QUAL-PROPS', int(row['SMEMSB1']) - 1).iloc[0]['QUALID']
|
|
1176
|
+
hbn_name = row['SMEMN'] + qual_name
|
|
989
1177
|
mfactor = row['MFACTOR']
|
|
990
1178
|
ts = hbn.get_multiple_timeseries(row['SVOL'],t_code,hbn_name)*mfactor + ts
|
|
991
1179
|
return ts
|
hspf/uci.py
CHANGED
|
@@ -221,7 +221,7 @@ class UCI():
|
|
|
221
221
|
for line in self.lines:
|
|
222
222
|
the_file.write(line+'\n')
|
|
223
223
|
|
|
224
|
-
def add_parameter_template(self,block,table_name,table_id,parameter,tpl_char = '~'):
|
|
224
|
+
def add_parameter_template(self,block,table_name,table_id,parameter,tpl_char = '~',opnids = None,single_template = True, group_id = ''):
|
|
225
225
|
|
|
226
226
|
table = self.table(block,table_name,0,False).reset_index()
|
|
227
227
|
column_names,dtypes,starts,stops = self.uci[(block,table_name,table_id)]._delimiters()
|
|
@@ -229,20 +229,31 @@ class UCI():
|
|
|
229
229
|
width = stops[column_names.index(parameter)] - starts[column_names.index(parameter)]
|
|
230
230
|
|
|
231
231
|
ids = ~table[parameter].isna() # Handle comment lines in uci
|
|
232
|
+
parameter = parameter.lower()
|
|
233
|
+
if opnids is not None:
|
|
234
|
+
ids = ids & (table['OPNID'].isin(opnids))
|
|
232
235
|
|
|
233
236
|
# Replace paramter name with PEST/PEST++ specification. Note this does not use the HSPF supplemental file so parameters are limited to width of uci file column
|
|
234
|
-
|
|
235
|
-
|
|
237
|
+
if single_template:
|
|
238
|
+
pest_param = group_id + parameter
|
|
239
|
+
template = tpl_char + pest_param + ' '*(width-len(pest_param)-2)+ tpl_char
|
|
240
|
+
pest_param = [pest_param]
|
|
241
|
+
else:
|
|
242
|
+
pest_param = group_id + parameter + table.loc[ids,'OPNID'].astype(str)
|
|
243
|
+
pest_param = pest_param.tolist()
|
|
244
|
+
template = [tpl_char + pest_param + ' '*(width-len(pest_param)-2)+ tpl_char for pest_param in pest_param]
|
|
245
|
+
#template = pest_param.apply(lambda name: tpl_char + name + ' '*(width-len(name)-1)+ tpl_char)
|
|
236
246
|
|
|
237
|
-
table.loc[ids,parameter] =
|
|
247
|
+
table.loc[ids,parameter.upper()] = template
|
|
238
248
|
table = table.set_index('OPNID')
|
|
239
249
|
self.replace_table(table,block,table_name,table_id)
|
|
250
|
+
return list(set(pest_param))
|
|
240
251
|
|
|
241
252
|
def write_tpl(self,tpl_char = '~',new_tpl_path = None):
|
|
242
253
|
if new_tpl_path is None:
|
|
243
254
|
new_tpl_path = self.filepath.parent.joinpath(self.filepath.stem + '.tpl')
|
|
244
255
|
self.merge_lines()
|
|
245
|
-
self.lines.insert(0,tpl_char)
|
|
256
|
+
self.lines.insert(0,'ptf ' + tpl_char)
|
|
246
257
|
self._write(new_tpl_path)
|
|
247
258
|
|
|
248
259
|
def write(self,new_uci_path):
|
|
@@ -1,15 +1,15 @@
|
|
|
1
1
|
hspf/Masslink_Timeseries.csv,sha256=TOV6PpR0SBI0FaAU1T-qyD2DyGsBFjUWZenvWXiS3wA,4985
|
|
2
2
|
hspf/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
3
|
hspf/build_warehouse.py,sha256=y91vNmdt918H5_jx9nxMUVmRG9ORdChYWEqQbbi3YKs,22861
|
|
4
|
-
hspf/hbn.py,sha256
|
|
4
|
+
hspf/hbn.py,sha256=-db1mC8m7MetQGQ8T_UZZW6KkbVwrVkKE4ub9Ji6i9M,24133
|
|
5
5
|
hspf/hbn2.py,sha256=OmuTVDxd0Boyd3GvBgzEfqvP7CTeYIJYPC7EXPgYu30,13190
|
|
6
6
|
hspf/hbn_cy.c,sha256=ZIJwWxyGx8fE5nM1HBd8-zNotmStGZscqXijl3KSRdI,593464
|
|
7
7
|
hspf/hbn_cy.html,sha256=o8wMdvEE547DRXcUHFPgAkkyJ665b6rloGL-qKClaTo,137735
|
|
8
8
|
hspf/hbn_cy.pyx,sha256=T-itpkvHlxHzQHKtJBS-M8_ToLBa1U_ajpV53hh_oI8,4323
|
|
9
9
|
hspf/helpers.py,sha256=Jv5lSUKVqML8iJOFfBPcA3bgHn9MbFZC4OO-9YHHP_w,3277
|
|
10
|
-
hspf/hspfModel.py,sha256=
|
|
11
|
-
hspf/reports.py,sha256=
|
|
12
|
-
hspf/uci.py,sha256=
|
|
10
|
+
hspf/hspfModel.py,sha256=Dpf0VHWAdQ7N932u6aYzXDzZCPrJbObW38tP0WRkG0E,10682
|
|
11
|
+
hspf/reports.py,sha256=cDhjMFrUAzU7zzoX4VPyCJvgVLObtApEWTkK1vFsKfQ,57304
|
|
12
|
+
hspf/uci.py,sha256=mi5lkt2bZlO0Lq4S7j3uZsLpHhS6dtdOIaO2Vi0ZNn8,34607
|
|
13
13
|
hspf/validations.py,sha256=BcNT0h5QDZW9lHpXk8KuHQvycl8a_4jQ91srwWFodRo,6666
|
|
14
14
|
hspf/warehouse.py,sha256=12Scro1a_hK5IvE9FENAMQv9jKWgtMYtAftJZ4p3nTQ,12239
|
|
15
15
|
hspf/wdm.py,sha256=q0hNqsMNrTkxHtKEX0q0wWlIZabXv6UX2HjNCF9WEW4,12734
|
|
@@ -48,8 +48,8 @@ hspf/data/Timeseries Catalog/RCHRES/OXRX.txt,sha256=NWdRFpJ60LsYzCGHjt8Llay3OI8j
|
|
|
48
48
|
hspf/data/Timeseries Catalog/RCHRES/PLANK.txt,sha256=0MAehIrF8leYQt0Po-9h6IiujzoWOlw-ADCV-bPiqs0,3508
|
|
49
49
|
hspf/data/Timeseries Catalog/RCHRES/SEDTRN.txt,sha256=SiTgD4_YWctTgEfhoMymZfv8ay74xzCRdnI005dXjyE,659
|
|
50
50
|
hspf/parser/__init__.py,sha256=2HvprGVCaJ9L-egvTj1MI-bekq5CNjtSBZfrCtQi3fs,92
|
|
51
|
-
hspf/parser/graph.py,sha256=
|
|
51
|
+
hspf/parser/graph.py,sha256=un5B7PiiaG9aWQHFeD1gHZtlWIIUnx4GG_-IER-4SmU,33119
|
|
52
52
|
hspf/parser/parsers.py,sha256=x3othxQogUmGNe_ctCU20atDrRM_B4lEbVJb3EMbwto,20850
|
|
53
|
-
hspf-2.1.
|
|
54
|
-
hspf-2.1.
|
|
55
|
-
hspf-2.1.
|
|
53
|
+
hspf-2.1.3.dist-info/METADATA,sha256=U-tcOOkBzPIgs2oMbnSdXznyT4oTU2lxvnneVSC5vvk,605
|
|
54
|
+
hspf-2.1.3.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
55
|
+
hspf-2.1.3.dist-info/RECORD,,
|
|
File without changes
|