pyhcal 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyhcal/__init__.py +7 -0
- pyhcal/calibrators.py +642 -0
- pyhcal/data/HUC_Names.csv +84 -0
- pyhcal/data/WISKI_EQUIS_XREF.csv +25789 -0
- pyhcal/data/stations_EQUIS.gpkg +0 -0
- pyhcal/data/stations_wiski.gpkg +0 -0
- pyhcal/figures.py +1024 -0
- pyhcal/metrics.py +485 -0
- pyhcal/modl_db.py +97 -0
- pyhcal/repository.py +98 -0
- pyhcal/setup_utils.py +573 -0
- pyhcal-1.0.0.dist-info/METADATA +15 -0
- pyhcal-1.0.0.dist-info/RECORD +14 -0
- pyhcal-1.0.0.dist-info/WHEEL +4 -0
pyhcal/__init__.py
ADDED
pyhcal/calibrators.py
ADDED
|
@@ -0,0 +1,642 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
Created on Fri Dec 10 16:51:50 2021
|
|
4
|
+
|
|
5
|
+
@author: mfratki
|
|
6
|
+
"""
|
|
7
|
+
#standard imports
|
|
8
|
+
from copy import deepcopy
|
|
9
|
+
import subprocess
|
|
10
|
+
#non-standard imports
|
|
11
|
+
import pandas as pd
|
|
12
|
+
pd.set_option('display.max_columns', None)
|
|
13
|
+
pd.set_option('display.max_rows', None)
|
|
14
|
+
# to reset this
|
|
15
|
+
pd.reset_option('display.max_columns')
|
|
16
|
+
import numpy as np
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
|
|
19
|
+
#My packages
|
|
20
|
+
from pyhspf.hspfModel import hspfModel
|
|
21
|
+
from pyhspf.wdm import wdmInterface
|
|
22
|
+
from pyhspf import helpers
|
|
23
|
+
from mpcaHydro import data_manager as dm
|
|
24
|
+
from pyhcal import metrics
|
|
25
|
+
from pyhcal import figures
|
|
26
|
+
from pyhcal.setup_utils import Builder
|
|
27
|
+
#from hspf_tools.orm.monitoring_db import MonitoringDatabase
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class calProject():
|
|
31
|
+
#valid_models = Builder.valid_models()
|
|
32
|
+
def __init__(self,project_location):
|
|
33
|
+
self.project_location = Path(project_location)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def new_project(self,model_name):
|
|
37
|
+
return Builder(model_name) #self._builder.new_project(project_location,model_name)
|
|
38
|
+
|
|
39
|
+
def load_project(self,model_name):
|
|
40
|
+
if model_name in [f.name for f in self.project_location.iterdir() if f.is_dir()]:
|
|
41
|
+
return calibrator(self.project_location.joinpath(model_name))
|
|
42
|
+
else:
|
|
43
|
+
answer = input("No calibration project for that model. Would you like to set on up? (yes or no")
|
|
44
|
+
if answer.lower() in ["y","yes"]:
|
|
45
|
+
self.new_project(model_name)
|
|
46
|
+
return calibrator(self.project_location.joinpath(model_name))
|
|
47
|
+
elif answer.lower() in ["n","no"]:
|
|
48
|
+
return
|
|
49
|
+
# Do other stuff
|
|
50
|
+
else:
|
|
51
|
+
print('please enter yes or no')
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def config_info(project_folder):
|
|
55
|
+
project_path = Path(project_folder)
|
|
56
|
+
info = {'project_path' : project_path,
|
|
57
|
+
'project_name' : project_path.name,
|
|
58
|
+
'model_path' : project_path.joinpath('model'),
|
|
59
|
+
'output_path' : project_path.joinpath('output'),
|
|
60
|
+
'start_date' : '1996-01-01',
|
|
61
|
+
'end_date' : '2100-01-01',
|
|
62
|
+
}
|
|
63
|
+
return info
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class calibrator:
|
|
69
|
+
def __init__(self,project_folder):
|
|
70
|
+
self.project_path = Path(project_folder)
|
|
71
|
+
self.project_name = self.project_path.name
|
|
72
|
+
self.model_path = self.project_path.joinpath('model')
|
|
73
|
+
self.output_path = self.project_path.joinpath('output')
|
|
74
|
+
self.run = None
|
|
75
|
+
#self.winHSPF = str(Path(__file__).resolve().parent.parent) + '\\bin\\WinHSPFLt\\WinHspfLt.exe'
|
|
76
|
+
self.start_date = '1996-01-01'
|
|
77
|
+
self.end_date = '2100-01-01'
|
|
78
|
+
|
|
79
|
+
# Load observational data into memory TODO: Convert to database?
|
|
80
|
+
self.dm = dm.dataManager(self.project_path.joinpath('data'))
|
|
81
|
+
#self.odm = MonitoringDatabase(cal.project_path.joinpath(cal.project_name))
|
|
82
|
+
|
|
83
|
+
self.targets = None
|
|
84
|
+
if self.project_path.joinpath('targets.csv').exists():
|
|
85
|
+
self.targets = pd.read_csv(self.project_path.joinpath('targets.csv'))
|
|
86
|
+
|
|
87
|
+
self.MODL_DB = pd.read_csv(self.project_path.joinpath('_'.join([self.project_name ,'MODL_DB.csv'])))
|
|
88
|
+
|
|
89
|
+
self.model = None
|
|
90
|
+
self._wdms = None
|
|
91
|
+
self.uci = None
|
|
92
|
+
|
|
93
|
+
## Input/Output methods
|
|
94
|
+
def initialize(self,reach_ids,default = 4):
|
|
95
|
+
|
|
96
|
+
self.uci.update_table(default,'RCHRES','BINARY-INFO',0,columns = ['HEATPR','HYDRPR','SEDPR','OXRXPR','NUTRPR','PLNKPR'],operator = 'set')
|
|
97
|
+
self.uci.update_table(2,'RCHRES','BINARY-INFO',0,columns = ['HEATPR','HYDRPR','SEDPR','OXRXPR','NUTRPR','PLNKPR'],opnids = reach_ids,operator = 'set')
|
|
98
|
+
|
|
99
|
+
self.uci.write(self.model.uci_file)
|
|
100
|
+
winHSPF = str(Path(__file__).resolve().parent.parent) + '\\bin\\WinHSPFLt\\WinHspfLt.exe'
|
|
101
|
+
subprocess.run([winHSPF,self.model.uci_file]) #, stdout=subprocess.PIPE, creationflags=0x08000000)
|
|
102
|
+
|
|
103
|
+
def set_dates(self, start_date = '1996-01-01',end_date ='2100-01-01'):
|
|
104
|
+
self.start_date = start_date
|
|
105
|
+
self.end_date = end_date
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def load_model(self,name):
|
|
109
|
+
|
|
110
|
+
if isinstance(name,int): # Default approach
|
|
111
|
+
self.run = name
|
|
112
|
+
name = '_'.join([self.project_name , str(name)])
|
|
113
|
+
else:
|
|
114
|
+
name = str(name)
|
|
115
|
+
self.run = None
|
|
116
|
+
|
|
117
|
+
self.model = hspfModel(self.model_path.joinpath(name + '.uci'))
|
|
118
|
+
|
|
119
|
+
# WDM data never changes so I am trying to avoid reloading it each time the model is updated.
|
|
120
|
+
# Very brittle solution. TODO: think harder about how to handle this.
|
|
121
|
+
if self._wdms is None:
|
|
122
|
+
try:
|
|
123
|
+
self._wdms = wdmInterface(self.model.wdm_paths)
|
|
124
|
+
except:
|
|
125
|
+
pass
|
|
126
|
+
|
|
127
|
+
self.model.wdms = self._wdms
|
|
128
|
+
self.model.reports.wdms = self._wdms
|
|
129
|
+
self.uci = deepcopy(self.model.uci) #uci to be manipulated
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
# def setup_run(self, reach_ids = None, time_Step = 3,n = 1):
|
|
133
|
+
# setup_utils.setup(self.uci,self.project_name,run = self.run,reach_ids = reach_ids,n = 1,time_step = 3)
|
|
134
|
+
|
|
135
|
+
def run_model(self,name = None,overwrite_hbn = False): # NO STATE CHANGE
|
|
136
|
+
|
|
137
|
+
if name is None:
|
|
138
|
+
name = '_'.join([self.project_name, str(self.run+1)])
|
|
139
|
+
elif isinstance(name,int): # Default approach
|
|
140
|
+
name = '_'.join([self.project_name , str(name)])
|
|
141
|
+
else:
|
|
142
|
+
name = str(name)
|
|
143
|
+
|
|
144
|
+
if not overwrite_hbn:
|
|
145
|
+
self.uci.update_bino(name)
|
|
146
|
+
|
|
147
|
+
uci_file = self.model_path.joinpath(name + '.uci').as_posix()
|
|
148
|
+
self.uci.write(uci_file)
|
|
149
|
+
winHSPF = str(Path(__file__).resolve().parent.parent) + '\\bin\\WinHSPFLt\\WinHspfLt.exe'
|
|
150
|
+
subprocess.run([winHSPF,uci_file]) #, stdout=subprocess.PIPE, creationflags=0x08000000)
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def get_simulated_output(self,reach_ids,constituent,time_step = 'YE'):
|
|
154
|
+
sim = self.model.hbns.get_reach_constituent(constituent,reach_ids,time_step)
|
|
155
|
+
sim.name = 'simulated'
|
|
156
|
+
return sim
|
|
157
|
+
|
|
158
|
+
def get_observed_data(self,station_ids,constituent,time_step = 'YE'):
|
|
159
|
+
obs = self.dm._get_data(station_ids,constituent,agg_period = time_step).sort_index(level = 'index')
|
|
160
|
+
obs.name = 'observed'
|
|
161
|
+
return obs
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def compare_simulated_observed(self,station_ids,reach_ids,constituent,time_step,flow_station_ids = None, dropna = False):
|
|
165
|
+
obs = self.get_observed_data(station_ids,constituent,time_step)
|
|
166
|
+
sim = self.get_simulated_output(reach_ids,constituent,time_step = time_step)
|
|
167
|
+
|
|
168
|
+
#Joing observed and simulated
|
|
169
|
+
df = sim.join(obs,how = 'outer')
|
|
170
|
+
df = df.loc[(df.index >= obs.index.min()) & (df.index <= obs.index.max())]
|
|
171
|
+
if dropna: df = df.dropna()
|
|
172
|
+
df.columns = ['simulated','observed']
|
|
173
|
+
|
|
174
|
+
if flow_station_ids is None: flow_station_ids = station_ids
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
# matching flow data
|
|
178
|
+
sim_flow = self.get_simulated_output(reach_ids,'Q',time_step)
|
|
179
|
+
sim_flow.name = 'simulated_flow'
|
|
180
|
+
df = df.join(sim_flow,how = 'inner')
|
|
181
|
+
obs_flow = self.get_observed_data(flow_station_ids,'Q',time_step)
|
|
182
|
+
obs_flow.name = 'observed_flow'
|
|
183
|
+
df = df.join(obs_flow,how='left')
|
|
184
|
+
df.columns = ['simulated','observed','simulated_flow','observed_flow']
|
|
185
|
+
|
|
186
|
+
# Add metadata
|
|
187
|
+
df.attrs['station_ids'] = station_ids
|
|
188
|
+
df.attrs['reach_ids'] = reach_ids
|
|
189
|
+
df.attrs['constituent'] = constituent
|
|
190
|
+
df.attrs['unit'] = obs.attrs['unit']
|
|
191
|
+
df.attrs['time_step'] = time_step
|
|
192
|
+
df.attrs['flow_station_ids'] = flow_station_ids
|
|
193
|
+
return df
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
def compare_wplmn(self,station_ids,reach_ids,constituent,unit,flow_station_ids = None,sample=True):
|
|
199
|
+
obs = pd.concat([self.dm.get_wplmn_data(station_id,constituent,unit,'D',samples_only=sample) for station_id in station_ids])
|
|
200
|
+
#sim = self.get_simulated_output(reach_ids,constituent,time_step = 'D',unit = unit)
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
if (constituent == 'TSS') & (unit == 'lb'): #convert TSS from lbs to us tons
|
|
204
|
+
obs.loc[:,'value'] = obs.loc[:,'value']/2000
|
|
205
|
+
|
|
206
|
+
sim = self.model.hbns.get_rchres_data(constituent,reach_ids, unit,'D')
|
|
207
|
+
|
|
208
|
+
df = sim.join(obs,how = 'outer')
|
|
209
|
+
df.columns = ['simulated','observed']
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
sim_flow = self.get_simulated_output(reach_ids,'Q','D')
|
|
216
|
+
sim_flow.name = 'simulated_flow'
|
|
217
|
+
df = df.join(sim_flow,how = 'inner')
|
|
218
|
+
if flow_station_ids is None:
|
|
219
|
+
# If wplmn station has flow data use it otherwise use the specifiec wiski station ids
|
|
220
|
+
# matching flow data
|
|
221
|
+
obs_flow = pd.concat([self.dm.get_wplmn_data(station_id,'Q',unit,'D',samples_only=sample) for station_id in station_ids])
|
|
222
|
+
else:
|
|
223
|
+
obs_flow = self.get_observed_data(flow_station_ids,'Q','D')
|
|
224
|
+
|
|
225
|
+
obs_flow.name = 'observed_flow'
|
|
226
|
+
df = df.join(obs_flow,how='left')
|
|
227
|
+
df.columns = ['simulated','observed','simulated_flow','observed_flow']
|
|
228
|
+
# sim_flow = self.model.hbns.get_rchres_data('Q',reach_ids, 'cfs','D')
|
|
229
|
+
# sim_flow.name = 'simulated_flow'
|
|
230
|
+
# df = df.join(sim_flow,how = 'inner')
|
|
231
|
+
# obs_flow = pd.concat([self.dm.get_data(station_id,'Q','cfs','D') for station_id in station_ids])
|
|
232
|
+
# obs_flow.name = 'observed_flow'
|
|
233
|
+
# df = df.join(obs_flow,how='left')
|
|
234
|
+
# df.columns = ['simulated','observed','simulated_flow','observed_flow']
|
|
235
|
+
|
|
236
|
+
df.attrs['station_ids'] = station_ids
|
|
237
|
+
df.attrs['reach_ids'] = reach_ids
|
|
238
|
+
df.attrs['constituent'] = constituent
|
|
239
|
+
df.attrs['unit'] = obs.attrs['unit']
|
|
240
|
+
df.attrs['time_step'] = 'D'
|
|
241
|
+
df.attrs['flow_station_ids'] = station_ids
|
|
242
|
+
return df
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
# Objective Functions sort of
|
|
247
|
+
|
|
248
|
+
def aggregate(self,station_ids,reach_ids,constituent,units,time_step,agg_func = 'mean'):
|
|
249
|
+
df = self.compare_simulated_observed(station_ids, reach_ids, constituent, units, time_step)
|
|
250
|
+
|
|
251
|
+
period = 'M'
|
|
252
|
+
|
|
253
|
+
if period == 'M':
|
|
254
|
+
grouper = df.index.month
|
|
255
|
+
if period == 'Y':
|
|
256
|
+
grouper = df.index.year
|
|
257
|
+
if period == 'W':
|
|
258
|
+
grouper = df.index.week
|
|
259
|
+
if period == 'D':
|
|
260
|
+
grouper = df.index.dayofyear
|
|
261
|
+
|
|
262
|
+
df_agg = pd.DataFrame(np.ones((12,3))*np.nan,index = range(1,13),columns = ['simulated','observed','ratio'])
|
|
263
|
+
df_agg.index.name = 'month'
|
|
264
|
+
df = df.groupby(grouper).agg(agg_func)[['simulated','observed']]
|
|
265
|
+
df.columns = ['simulated','observed']
|
|
266
|
+
df['ratio'] = df['observed']/df['simulated']
|
|
267
|
+
df_agg.loc[df.index,df.columns] = df.values
|
|
268
|
+
|
|
269
|
+
df_agg.loc['Mean'] = df_agg.agg('mean')
|
|
270
|
+
df_agg['ratio'] = df_agg['observed']/df_agg['simulated']
|
|
271
|
+
|
|
272
|
+
return df_agg
|
|
273
|
+
|
|
274
|
+
def landcover(self,constituent):
|
|
275
|
+
perlnd_names = self.model.uci.table('PERLND','GEN-INFO')['LSID']
|
|
276
|
+
df = self.model.hbns.get_perlnd_data(constituent)*2000 #tons/ac/yr to lbs/acr/year
|
|
277
|
+
df = df[(df.index >= self.start_date) & (df.index <= self.end_date)]
|
|
278
|
+
|
|
279
|
+
# if self.model.uci.LSID_flag == 1:
|
|
280
|
+
# print('LSID Error')
|
|
281
|
+
# return
|
|
282
|
+
|
|
283
|
+
perland_dict = self.model.uci.opnid_dict['PERLND']
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
dom_lc = int( self.targets['lc_number'][ self.targets['dom_lc'] == 1] )
|
|
287
|
+
|
|
288
|
+
df_mean_norm = normalize_mean(df,perland_dict,dom_lc)
|
|
289
|
+
df_mean_norm.loc[df_mean_norm.mean_norm ==0,'mean_norm'] = np.nan
|
|
290
|
+
targets = self.targets.set_index('lc_number')[constituent]
|
|
291
|
+
targets = targets/targets.loc[dom_lc]
|
|
292
|
+
|
|
293
|
+
df_mean_norm['target'] = targets.loc[df_mean_norm['landcover']].values/df_mean_norm['mean_norm']
|
|
294
|
+
df_mean_norm = df_mean_norm.replace(np.nan,1) #don't make any changes to 0 concentration perlands
|
|
295
|
+
df_mean_norm['uci_name'] = perlnd_names.loc[df_mean_norm.index]
|
|
296
|
+
return df_mean_norm
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
def scour(self,target):
|
|
300
|
+
# Erosivity adjustment only
|
|
301
|
+
assert((target > 0) & (target < 1))
|
|
302
|
+
scour = self.model.reports.scour_report()
|
|
303
|
+
#TODO: add check for this
|
|
304
|
+
# Assume all nonpoint values are greater than 0...
|
|
305
|
+
# if depscour is greater than 0
|
|
306
|
+
target = scour['nonpoint']*(1-target)/target # Assuming nonpoint load is set
|
|
307
|
+
adjustment = np.abs(scour['depscour'])/target
|
|
308
|
+
adjustment[(adjustment < 1.05) & (adjustment > .95)] = 1 # Don't change reaches where the depscour is close to the target
|
|
309
|
+
adjustment[adjustment > 1.05] = .95 # Since depscour is negative we have to swap this. I think if I do target/depscour this line would be less confusing
|
|
310
|
+
adjustment[adjustment < .95] = 1.05
|
|
311
|
+
adjustment[scour['depscour'] > 0] = 2 # Double any values where the depscour is positive
|
|
312
|
+
adjustment[scour['LKFG'] == 1] = 1 # Ignore lake flags
|
|
313
|
+
adjustment[np.isnan(adjustment)] = 1
|
|
314
|
+
|
|
315
|
+
return adjustment
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
# Methods for updating table values
|
|
319
|
+
def update_table(self,value,operation,table_name,table_id,opnids = None,columns = None,operator = '*',axis = 0):
|
|
320
|
+
self.uci.update_table(value,operation,table_name,table_id,opnids,columns,operator,axis)
|
|
321
|
+
|
|
322
|
+
def get_adjustments(self,station_ids,reach_ids, constituent,method):
|
|
323
|
+
|
|
324
|
+
if method == 'landcover':
|
|
325
|
+
adjustment = self.landcover(constituent)['target'] #pandas series #by opnid
|
|
326
|
+
axis = 0
|
|
327
|
+
elif method =='load': #monthly
|
|
328
|
+
adjustment = self.aggregate(station_ids,reach_ids, constituent,'lb','D',agg_func = 'sum')['ratio'].iloc[0:12] #pandas series by column
|
|
329
|
+
axis = 1
|
|
330
|
+
elif method == 'conc':
|
|
331
|
+
adjustment = self.aggregate(station_ids,reach_ids, constituent,'mg/l','D')['ratio'].iloc[0:12] #pandas series by column
|
|
332
|
+
axis = 1
|
|
333
|
+
|
|
334
|
+
else:
|
|
335
|
+
print('Not a valid method')
|
|
336
|
+
return adjustment,axis #ouput dataframe with index representing the opnid
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
def update_kser(self,station_ids,reach_ids, constituent,method,opnids = None):
|
|
340
|
+
#TODO account for the additional comment column
|
|
341
|
+
assert method in ['load','landcover']
|
|
342
|
+
|
|
343
|
+
adjustment,axis = self.get_adjustments(station_ids,reach_ids, constituent,method)
|
|
344
|
+
if (method == 'landcover') & (opnids is None):
|
|
345
|
+
opnids = adjustment.index.intersection(self.uci.table('PERLND','SED-PARM3',0).index)
|
|
346
|
+
|
|
347
|
+
self.update_table(adjustment,'PERLND','SED-PARM3', 0,opnids,['KSER'], '*',axis)
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
def update_qualprop(self,station_ids,reach_ids, constituent,method,table_name, opnids = None, months = [1,2,3,4,5,6,7,8,9,10,11,12],threshold = 0, max_change = 1000, update_alg = '*'):
|
|
351
|
+
assert method in ['conc','load','landcover']
|
|
352
|
+
assert table_name in ['MON-IFLW-CONC','MON-GRND-CONC']
|
|
353
|
+
assert max_change >= threshold
|
|
354
|
+
|
|
355
|
+
|
|
356
|
+
|
|
357
|
+
threshold = threshold/100
|
|
358
|
+
max_change = max_change/100
|
|
359
|
+
|
|
360
|
+
table_id = {'N':1,
|
|
361
|
+
'TKN':0,
|
|
362
|
+
'OP':2,
|
|
363
|
+
'TP':3}[constituent]
|
|
364
|
+
#nutrient_id-1 # Shift as tables in the uci dictionary are stored starting at the 0 index
|
|
365
|
+
|
|
366
|
+
adjustment,axis = self.get_adjustments(station_ids,reach_ids, constituent,method)
|
|
367
|
+
if opnids is None:
|
|
368
|
+
opnids = adjustment.index.intersection(self.uci.table('PERLND',table_name,table_id).index)
|
|
369
|
+
|
|
370
|
+
if method == 'load':
|
|
371
|
+
adjustment = adjustment.loc[months]
|
|
372
|
+
|
|
373
|
+
if method == 'landcover':
|
|
374
|
+
adjustment = adjustment.loc[opnids]
|
|
375
|
+
|
|
376
|
+
|
|
377
|
+
# Apply threshold adjustment
|
|
378
|
+
adjustment[np.isnan(adjustment)] = 1
|
|
379
|
+
adjustment[np.abs((adjustment-1)) <= threshold] = 1 # don't change values below threshold
|
|
380
|
+
direction = np.sign(adjustment - 1)[np.abs(adjustment - 1) > max_change]
|
|
381
|
+
adjustment[np.abs(adjustment-1) > max_change] = 1+ direction*max_change # Formla assumes it is flux/model
|
|
382
|
+
#Note that in uci.update_table() there is further screening to account for adjustments below the model precision
|
|
383
|
+
|
|
384
|
+
#column_prefix = self.uci.get_table_info('PERLND',table_name)['NAME'].str[:3].iloc[1]
|
|
385
|
+
months = np.array(months)
|
|
386
|
+
column_prefix = self.uci.table('PERLND',table_name,table_id).columns.str[:3][0]
|
|
387
|
+
columns = [column_prefix+helpers.get_months(month) for month in months]
|
|
388
|
+
|
|
389
|
+
self.uci.update_table(adjustment.values,'PERLND',table_name,table_id,opnids,columns,update_alg,axis)
|
|
390
|
+
|
|
391
|
+
def update_erosivity(self, opnids = None):
|
|
392
|
+
adjustment,axis = self.get_adjustments(0,'scour')
|
|
393
|
+
|
|
394
|
+
table = self.uci.table('RCHRES','SILT-CLAY-PM',0)
|
|
395
|
+
|
|
396
|
+
if opnids is None:
|
|
397
|
+
opnids = adjustment.index.intersection(table.index)
|
|
398
|
+
|
|
399
|
+
self.update_table(adjustment,'RCHRES','SILT-CLAY-PM', 0,opnids,'M','*',axis)
|
|
400
|
+
self.update_table(adjustment,'RCHRES','SILT-CLAY-PM', 1,opnids,'M','*',axis)
|
|
401
|
+
|
|
402
|
+
|
|
403
|
+
def save_output(self, constituent,station_ids,reach_ids,time_step = 'D',flow_station_ids=None,drng_area = None,save_path = None,start_year = 1996, end_year = 2100):
|
|
404
|
+
if save_path is None:
|
|
405
|
+
save_path = self.output_path
|
|
406
|
+
if drng_area is None:
|
|
407
|
+
drng_area = self.uci.network.drainage_area(reach_ids)
|
|
408
|
+
#drng_area = sum([self.uci.network.drainage_area(reach_id) for reach_id in reach_ids])
|
|
409
|
+
|
|
410
|
+
run_number = self.run
|
|
411
|
+
|
|
412
|
+
if flow_station_ids is None:
|
|
413
|
+
flow_station_ids = station_ids
|
|
414
|
+
|
|
415
|
+
if constituent == 'Q':
|
|
416
|
+
constituent = 'Q'
|
|
417
|
+
units = 'cfs'
|
|
418
|
+
df = self.compare_simulated_observed(flow_station_ids,reach_ids,constituent,
|
|
419
|
+
time_step = time_step)
|
|
420
|
+
|
|
421
|
+
|
|
422
|
+
|
|
423
|
+
#TODO: Move the exporting of these tables into the report module to decouple things
|
|
424
|
+
metrics.hydro_stats(df.dropna(),drng_area).to_csv(save_path.joinpath(f'hydrostats_{station_ids}'))
|
|
425
|
+
self.model.reports.annual_water_budget('PERLND').to_csv(save_path.joinpath(f'{station_ids}_annual_perlnd_water_budget.csv'))
|
|
426
|
+
self.model.reports.annual_water_budget('IMPLND').to_csv(save_path.joinpath(f'{station_ids}_annual_implnd_water_budget.csv'))
|
|
427
|
+
self.model.reports.annual_water_budget('RCHRES').to_csv(save_path.joinpath(f'{station_ids}_annual_perlnd_rchres_budget.csv'))
|
|
428
|
+
self.model.reports.ann_avg_watershed_loading(constituent,reach_ids).to_csv(save_path.joinpath('annual_runoff.csv'))
|
|
429
|
+
#self.model.reports.monthly_runoff().to_csv(save_path.joinpath('monthly_runoff.csv'))
|
|
430
|
+
|
|
431
|
+
|
|
432
|
+
#self.model.reports.avg_ann_yield('Q',reach_ids).to_csv(save_path.joinpath(f'{station_ids}_avg_ann_outflows.csv'))
|
|
433
|
+
#self.model.reports.avg_monthly_outflow().to_csv(save_path.joinpath(f'{station_ids}_avg_monthly_outflows.csv'))
|
|
434
|
+
|
|
435
|
+
figures.contTimeseries(df,station_ids,constituent,units,save_path)
|
|
436
|
+
figures.FDCexceed(df.dropna(),station_ids,constituent,units,save_path)
|
|
437
|
+
figures.scatter(df.dropna(),station_ids,constituent,units,save_path)
|
|
438
|
+
figures.monthly_bar(metrics.monthly(df.dropna(),units),station_ids,constituent,units,save_path)
|
|
439
|
+
|
|
440
|
+
else:
|
|
441
|
+
assert constituent in ['TSS','TP','OP','N','TKN']
|
|
442
|
+
df= self.compare_simulated_observed(
|
|
443
|
+
station_ids,
|
|
444
|
+
reach_ids,
|
|
445
|
+
constituent,
|
|
446
|
+
time_step,
|
|
447
|
+
flow_station_ids = flow_station_ids)
|
|
448
|
+
|
|
449
|
+
if df.empty:
|
|
450
|
+
print(f'No Observation Data found for {constituent} at {station_ids}')
|
|
451
|
+
return
|
|
452
|
+
reachid_str = '_'.join([str(reach_id) for reach_id in reach_ids])
|
|
453
|
+
|
|
454
|
+
figures.timeseries(df,station_ids,constituent,'mg/l',save_path = save_path.joinpath(f'{constituent}_Timeseries_{reachid_str}_{run_number}.png'))
|
|
455
|
+
|
|
456
|
+
|
|
457
|
+
df =df.loc[(df.index.year >= start_year) & (df.index.year <= end_year)]
|
|
458
|
+
|
|
459
|
+
stats = metrics.stats(df.dropna(subset=['observed']),'mg/l')[['observed','simulated','per_error','abs_error']]
|
|
460
|
+
stats.to_csv(save_path.joinpath(f'{constituent}_stats_{reachid_str}_{run_number}.csv'))
|
|
461
|
+
|
|
462
|
+
figures.scatter(df.dropna(subset=['observed']),station_ids,constituent,'mg/l',save_path.joinpath(f'{constituent}_Scatter_{reachid_str}_{run_number}.png'))
|
|
463
|
+
figures.FDCexceed(df.dropna(subset=['observed']),station_ids,constituent,'mg/l',save_path.joinpath(f'{constituent}_Exceedence_{reachid_str}_{run_number}.png'))
|
|
464
|
+
figures.timeseries(df,station_ids,constituent,'mg/l',save_path.joinpath(f'{constituent}_Timeseries_{reachid_str}_{run_number}.png'))
|
|
465
|
+
|
|
466
|
+
if len(df.dropna(subset = ['observed','observed_flow'])) > 10:
|
|
467
|
+
figures.rating(df.dropna(subset = ['observed','observed_flow']),station_ids,constituent,'mg/l',save_path.joinpath(f'{constituent}_Rating_{reachid_str}_{run_number}.png'))
|
|
468
|
+
figures.LDC(df.dropna(subset = ['observed','observed_flow']),station_ids,constituent,'mg/l',time_step = time_step, save_path = save_path.joinpath(f'{constituent}_LDC_{reachid_str}_{run_number}.png'))
|
|
469
|
+
|
|
470
|
+
|
|
471
|
+
|
|
472
|
+
|
|
473
|
+
def chuck(adjustment,table):
|
|
474
|
+
# If increasing monthly concentration increase the minimum concnetration value of Mi and Mi+1
|
|
475
|
+
# If decreasing monthly concentration decrease the maximum concnetration value of Mi and Mi+1
|
|
476
|
+
# If concnetration values are equal increase both equally
|
|
477
|
+
table['dummy'] = table.iloc[:,0]
|
|
478
|
+
zero_table = table.copy()*0
|
|
479
|
+
count_table = zero_table.copy()
|
|
480
|
+
for index, value in enumerate(adjustment[0]):
|
|
481
|
+
next_index = index+1
|
|
482
|
+
if value > 1:
|
|
483
|
+
for row,(a,b) in enumerate(zip(table.iloc[:,index].values, table.iloc[:,next_index].values)):
|
|
484
|
+
zero_table.iloc[row,index+np.nanargmin([a,b])] += np.nanmin([a,b])*value
|
|
485
|
+
count_table.iloc[row,index+np.nanargmin([a,b])] += 1
|
|
486
|
+
elif value < 1:
|
|
487
|
+
for row,(a,b) in enumerate(zip(table.iloc[:,index].values, table.iloc[:,next_index].values)):
|
|
488
|
+
zero_table.iloc[row,index+np.nanargmax([a,b])] += np.nanmax([a,b])*value
|
|
489
|
+
count_table.iloc[row,index+np.nanargmax([a,b])] += 1
|
|
490
|
+
|
|
491
|
+
|
|
492
|
+
zero_table.drop('dummy',axis=1,inplace=True)
|
|
493
|
+
count_table.drop('dummy',axis=1,inplace=True)
|
|
494
|
+
|
|
495
|
+
zero_table[count_table == 0] = table[count_table==0]
|
|
496
|
+
count_table[count_table == 0] = 1
|
|
497
|
+
zero_table = zero_table/count_table
|
|
498
|
+
return zero_table
|
|
499
|
+
|
|
500
|
+
def normalize_mean(df,perland_dict,dom_lc):
|
|
501
|
+
df_mean = df.mean().rename('mean').to_frame()
|
|
502
|
+
df_mean['mean_norm'] = df_mean['mean'].values
|
|
503
|
+
|
|
504
|
+
perlands = df_mean.index
|
|
505
|
+
perland_dict['landcover_group'] = (perland_dict['landcover'] == perland_dict['landcover'].iloc[0]).cumsum()
|
|
506
|
+
|
|
507
|
+
metzones = np.array([perland_dict.loc[perland,'metzone'] for perland in perlands])
|
|
508
|
+
landcover = np.array([perland_dict.loc[perland,'landcover'] for perland in perlands])
|
|
509
|
+
landcover_groups = np.array([perland_dict.loc[perland,'landcover_group'] for perland in perlands])
|
|
510
|
+
|
|
511
|
+
for group in np.unique(landcover_groups):
|
|
512
|
+
dom_perlnd = perland_dict.index[(perland_dict['landcover_group'] == group) & (perland_dict['landcover'] == dom_lc)]
|
|
513
|
+
df_mean.loc[landcover_groups == group,'mean_norm'] = df_mean.loc[landcover_groups == group,'mean_norm']/df_mean.loc[dom_perlnd,'mean_norm'].values
|
|
514
|
+
|
|
515
|
+
df_mean['metzones'] = metzones
|
|
516
|
+
df_mean['landcover'] = landcover
|
|
517
|
+
df_mean['perlands'] = perlands
|
|
518
|
+
df_mean.sort_values(by = ['metzones','perlands'],inplace = True)
|
|
519
|
+
return df_mean
|
|
520
|
+
|
|
521
|
+
|
|
522
|
+
# def column_prefix():
|
|
523
|
+
# months = np.array(months)
|
|
524
|
+
# column_prefix = self.uci.table('PERLND',table_name,table_id).columns.str[:3][0]
|
|
525
|
+
# columns = [column_prefix+ch.get_months(month) for month in months]
|
|
526
|
+
|
|
527
|
+
def threshold(adjustment,threshold,max_change):
|
|
528
|
+
# Apply threshold adjustment
|
|
529
|
+
adjustment[np.isnan(adjustment)] = 1
|
|
530
|
+
adjustment[np.abs((adjustment-1)) <= threshold] = 1 # don't change values below threshold
|
|
531
|
+
direction = np.sign(adjustment - 1)[np.abs(adjustment - 1) > max_change]
|
|
532
|
+
adjustment[np.abs(adjustment-1) > max_change] = 1+ direction*max_change # Formla assumes it is flux/model
|
|
533
|
+
#Note that in uci.update_table() there is further screening to account for adjustments below the model precision
|
|
534
|
+
return adjustment
|
|
535
|
+
|
|
536
|
+
|
|
537
|
+
#class hydrologyCalibrator(calibrator):
|
|
538
|
+
|
|
539
|
+
#class nutrientCalibrator(calibrator):
|
|
540
|
+
|
|
541
|
+
class sedimentCalibrator(calibrator):
|
|
542
|
+
|
|
543
|
+
def update_kser(self,method,opnid = None):
|
|
544
|
+
#TODO account for the additional comment column
|
|
545
|
+
assert method in ['load','landcover','sftl']
|
|
546
|
+
|
|
547
|
+
table = self.uci.table('PERLND','SED-PARM3',0,False)
|
|
548
|
+
|
|
549
|
+
|
|
550
|
+
if method == 'load':
|
|
551
|
+
adjustment = self.compare(0,aggregate = True).loc['Mean']['ratio']
|
|
552
|
+
elif method == 'landcover':
|
|
553
|
+
adjustment = self.landcover(0)['target']
|
|
554
|
+
table = self.uci.table('PERLND','SED-PARM3',0)
|
|
555
|
+
if opnid == None:
|
|
556
|
+
opnid = table.index
|
|
557
|
+
adjustment = np.array(adjustment.loc[opnid])[:,None]
|
|
558
|
+
elif method == 'sftl':
|
|
559
|
+
adjustment = self.sftl()
|
|
560
|
+
|
|
561
|
+
self.uci.replace_table('PERLND','SED-PARM3',0)
|
|
562
|
+
|
|
563
|
+
def update_erosivity(self,param = 'M',opnid = None,update_alg = '*'):
|
|
564
|
+
adjustment = self.scour()
|
|
565
|
+
table = self.uci.table('RCHRES','SILT-CLAY-PM',0)
|
|
566
|
+
if opnid == None:
|
|
567
|
+
opnid = table.index
|
|
568
|
+
adjustment = np.array(adjustment.loc[opnid])[:,None]
|
|
569
|
+
self.uci.update_table(adjustment,'RCHRES','SILT-CLAY-PM',table_id = 0,opnid = opnid,columns = [param],update_alg = update_alg)
|
|
570
|
+
|
|
571
|
+
adjustment = self.scour()
|
|
572
|
+
adjustment = np.array(adjustment.loc[opnid])[:,None]
|
|
573
|
+
self.uci.update_table(adjustment,'RCHRES','SILT-CLAY-PM',table_id = 1,opnid = opnid,columns = [param],update_alg = update_alg)
|
|
574
|
+
|
|
575
|
+
|
|
576
|
+
def fit_param(self,param,m_factor,N = 2,opnid = None,run = None):
|
|
577
|
+
bounds = {'M':[.000000001,.01,2,5], #maxlow,low,high,maxhigh
|
|
578
|
+
'TAUCD':[.001,.01,.3,1],
|
|
579
|
+
'TAUCS':[.01,.05,.5,3]}
|
|
580
|
+
if run == None:
|
|
581
|
+
run = self.run
|
|
582
|
+
|
|
583
|
+
data = self.load_data('scour',N=10000)
|
|
584
|
+
data = data.loc[:,range(run-N+1,run+1),:]
|
|
585
|
+
|
|
586
|
+
if opnid == None:
|
|
587
|
+
opnid = data.reset_index(level=[1]).index.unique() # assumes multiindex
|
|
588
|
+
|
|
589
|
+
for index in opnid:
|
|
590
|
+
if any(data.loc[index]['LKFG'] == 0):
|
|
591
|
+
x = data.loc[index]['depscour']
|
|
592
|
+
y = data.loc[index][param]
|
|
593
|
+
linear_model=np.polyfit(x,y,1)
|
|
594
|
+
linear_model_fn=np.poly1d(linear_model)
|
|
595
|
+
m = linear_model_fn(-data.loc[index]['nonpoint'].iloc[1]*.25)
|
|
596
|
+
if m < bounds[param][0]:
|
|
597
|
+
m = bounds[param][0]
|
|
598
|
+
if m > bounds[param][3]:
|
|
599
|
+
m = bounds[param][3]
|
|
600
|
+
self.update_table('RCHRES','SILT-CLAY-PM',0,m,'set',opnid = index,columns = [param]) #mod.update_table(operation,table_name,table_id,adjustment,operator,opnids,columns)
|
|
601
|
+
self.update_table('RCHRES','SILT-CLAY-PM',1,m*m_factor,'set',opnid = index,columns = [param]) #mod.update_table(operation,table_name,table_id,adjustment,operator,opnids,columns)
|
|
602
|
+
|
|
603
|
+
def erosivity(self,m_factor,param = 'M',opnid = None,run = None,iterations = 1):
|
|
604
|
+
|
|
605
|
+
if run == None:
|
|
606
|
+
run = self.run
|
|
607
|
+
|
|
608
|
+
# run model updating erosivity for N iterations
|
|
609
|
+
for iteration in range(iterations):
|
|
610
|
+
self.update_erosivity(param = param,opnid = opnid)
|
|
611
|
+
self.run_model() # creates the run+1 uci file and runs it using WinHspfLT
|
|
612
|
+
run = run + 1
|
|
613
|
+
self.load_model(run)
|
|
614
|
+
self.save_data()
|
|
615
|
+
|
|
616
|
+
|
|
617
|
+
self.fit_param(param,m_factor,iterations+1,opnid,run)
|
|
618
|
+
self.run_model() # creates the run+1 uci file and runs it using WinHspfLT
|
|
619
|
+
|
|
620
|
+
run = run + 1
|
|
621
|
+
self.load_model(run)
|
|
622
|
+
self.save_data()
|
|
623
|
+
|
|
624
|
+
def scour(hbn,uci):
|
|
625
|
+
# Erosivity adjustment only
|
|
626
|
+
scour = reports.scour_report(hbn,uci)
|
|
627
|
+
#TODO: add check for this
|
|
628
|
+
# Assume all nonpoint values are greater than 0...
|
|
629
|
+
# if depscour is greater than 0
|
|
630
|
+
target = scour['nonpoint']*.25 # Assuming nonpoint load is set
|
|
631
|
+
adjustment = np.abs(scour['depscour'])/target
|
|
632
|
+
adjustment[(adjustment < 1.05) & (adjustment > .95)] = 1 # Don't change reaches where the depscour is close to the target
|
|
633
|
+
adjustment[adjustment > 1.05] = .95 # Since depscour is negative we have to swap this. I think if I do target/depscour this line would be less confusing
|
|
634
|
+
adjustment[adjustment < .95] = 1.05
|
|
635
|
+
adjustment[scour['depscour'] > 0] = 2 # Double any values where the depscour is positive
|
|
636
|
+
adjustment[scour['LKFG'] == 1] = 1 # Ignore lake flags
|
|
637
|
+
adjustment[np.isnan(adjustment)] = 1
|
|
638
|
+
|
|
639
|
+
return adjustment
|
|
640
|
+
|
|
641
|
+
|
|
642
|
+
|