hspf 2.0.3__py3-none-any.whl → 2.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
hspf/reports.py CHANGED
@@ -6,7 +6,7 @@ Created on Mon Apr 11 08:26:04 2022
6
6
  """
7
7
  import numpy as np
8
8
  import pandas as pd
9
- from . import helpers
9
+ from hspf import helpers
10
10
  from pathlib import Path
11
11
 
12
12
  #timeseries_catalog = pd.read_csv(Path(__file__).parent/'TIMESERIES_CATALOG.csv')
@@ -37,22 +37,31 @@ class Reports():
37
37
  else:
38
38
  return annual_reach_water_budget(self.uci,self.hbns)
39
39
 
40
- # def annual_runoff(self):
41
- # #assert operation in ['PERLND','IMPLND']
42
- # #if operation == 'PERLND':
43
- # return annual_perlnd_runoff(self.uci,self.hbns)
44
- # #else:
45
- # # raise NotImplementedError()
40
+ def watershed_loading(self,constituent,reach_ids,upstream_reach_ids = None,by_landcover = False):
41
+ '''
42
+ Calculate the loading to channels from a watershed.
46
43
 
47
- # def monthly_runoff(self,landcover=None):
48
- # df = monthly_perlnd_runoff(self.uci,self.hbns).unstack().T
49
- # if landcover is None:
50
- # return df
51
- # else:
52
- # return df.loc[landcover]
53
-
54
- def annual_sediment_budget(self):
55
- return annual_sediment_budget(self.uci,self.hbns)
44
+ Parameters
45
+ ----------
46
+ constituent : str
47
+ Constituent to calculate loading for (e.g. 'TP', 'TSS', 'N', 'OP', 'Q', 'TKN')
48
+ reach_ids : list
49
+ List of reach IDs defining the watershed outlet
50
+ upstream_reach_ids : list, optional
51
+ List of reach IDs defining the upstream boundary of the watershed. The default is None.
52
+ by_landcover : bool, optional
53
+ If True, returns loading by landcover type. The default is False.
54
+ '''
55
+ return get_watershed_loading(self.uci,self.hbns,reach_ids,constituent,upstream_reach_ids,by_landcover)
56
+
57
+ def catchment_loading(self,constituent,by_landcover = False):
58
+ return get_catchment_loading(self.uci,self.hbns,constituent,by_landcover)
59
+
60
+ def contributions(self,constituent,target_reach_id):
61
+ return total_contributions(constituent,self.uci,self.hbns,target_reach_id)
62
+
63
+ def landcover_contributions(self,constituent,target_reach_id,landcover = None):
64
+ return catchment_contributions(self.uci,self.hbns,constituent,target_reach_id)
56
65
 
57
66
  def ann_avg_subwatershed_loading(self,constituent):
58
67
  return ann_avg_subwatershed_loading(constituent,self.uci, self.hbns)
@@ -68,33 +77,17 @@ class Reports():
68
77
  total['share'] = total['volume_percent']/total['area_percent']
69
78
  return total
70
79
 
71
- # def monthly_avg_subwatershed_loading(self,constituent,month):
72
- # return monthly_avg_subwatershed_loading(constituent,month,self.uci, self.hbns)
73
-
74
- # def monthly_avg_watershed_loading(self,constituent,reach_ids,month,by_landcover = True):
75
- # return monthly_avg_watershed_loading(constituent,reach_ids,month,self.uci, self.hbns,by_landcover = by_landcover)
76
-
77
-
78
- def ann_avg_yield(self,constituent,reach_ids):
79
- df= avg_ann_yield(self.uci,self.hbns,constituent,reach_ids)
80
+ def ann_avg_yield(self,constituent,reach_ids,upstream_reach_ids = None):
81
+ df= avg_ann_yield(self.uci,self.hbns,constituent,reach_ids,upstream_reach_ids)
80
82
  return df
81
83
 
82
84
  def annual_precip(self):
83
85
  return avg_annual_precip(self.uci,self.wdms)
84
86
 
85
- # def water_balance(self,reach_ids = None):
86
- # if reach_ids is None:
87
- # reach_ids = self.uci.network.outlets()
88
- # return water_balance(self.uci,self.hbns,self.wdms,reach_ids)
89
-
90
87
  def simulated_et(self):
91
88
  return simulated_et(self.uci,self.hbns)
92
89
 
93
- # def inflows(self):
94
- # return inflows(self.uci,self.wdms)
95
-
96
-
97
-
90
+
98
91
 
99
92
  #%% Channel Reports
100
93
  def scour(hbn,uci,start_year = '1996',end_year = '2030'):
@@ -175,37 +168,218 @@ def get_catchments(uci,reach_ids):
175
168
  return landcover
176
169
 
177
170
 
178
- #%% Catchment Loading (ie. Direct contributions from perlnds/implnds, no losses)
179
- # Q
180
- # Subwatershed Weighted Mean Timeseries Output
181
- # operation = 'PERLND'
182
- # ts_name = 'PERO',
183
- # time_code = 5
184
171
 
172
+ #%% Landscape Yields
173
+
174
+ def yield_flow(uci,hbn,constituent,reach_id):
175
+ hbn.get_rchres_data('Q',reach_id,'cfs','yearly')/uci.network.drainage_area(reach_id)
176
+
177
+
178
+ def yield_sediment(uci,hbn,constituent,reach_id):
179
+ hbn.get_rchres_data('TSS',reach_id,'lb','yearly').mean()*2000/uci.network.drainage_area(reach_id)
180
+
181
+ def avg_ann_yield(uci,hbn,constituent,reach_ids,upstream_reach_ids = None):
182
+ #reach_ids = uci.network.G.nodes
183
+
184
+ reach_ids = uci.network.get_opnids('RCHRES',reach_ids,upstream_reach_ids)
185
+ area = uci.network.drainage_area(reach_ids,upstream_reach_ids)
186
+
187
+ if constituent == 'Q':
188
+ units = 'acrft'
189
+ else:
190
+ units = 'lb'
191
+
192
+ df = hbn.get_reach_constituent(constituent,reach_ids,5,unit =units).mean() # Gross
193
+
194
+ return df/area
195
+
196
+ #%% Catchment and Watershed Loading
197
+
198
+ def landcover_areas(uci):
199
+ df = uci.network.operation_area('PERLND').groupby('LSID').sum()
200
+ df['percent'] = 100*(df['AFACTR']/df['AFACTR'].sum())
201
+ return df
185
202
 
186
- LOADING_MAP = {'Q' : [{'t_opn':'PERLND',
187
- 't_con': 'PERO',
188
- 't_code': 'yearly',
189
- 'activity': 'PWATER'}],
190
- 'TSS': [{'t_opn':'PERLND',
191
- 't_con': 'SOSED',
192
- 't_code': 'yearly',
193
- 'activity': 'SEDMNT'},
194
- {'t_opn':'IMPLND',
195
- 't_con': 'SOSED',
196
- 't_code': 'yearly',
197
- 'activity': 'SEDMNT'}]}
203
+ def catchment_areas(uci):
204
+ df = uci.network.subwatersheds().reset_index()
205
+ df = df.groupby('TVOLNO')['AFACTR'].sum().reset_index()
206
+ df.rename(columns = {'AFACTR':'catchment_area'},inplace = True)
207
+ return df
198
208
 
209
+ def get_constituent_loading(uci,hbn,constituent,time_step = 5):
199
210
 
200
- # def annual_average_subwatershed_loading(constituent,uci,hbn,reach_ids):
201
- # '''
202
211
 
203
- # For each subwatershed the annual average loading rate
212
+ if constituent == 'TP':
213
+ perlnds = total_phosphorous(uci,hbn,t_code=time_step,operation = 'PERLND').reset_index().melt(id_vars = ['index'],var_name = 'OPNID')
214
+ implnds = total_phosphorous(uci,hbn,t_code=time_step,operation = 'IMPLND').reset_index().melt(id_vars = ['index'],var_name = 'OPNID')
215
+ else:
216
+ perlnds = hbn.get_perlnd_constituent(constituent,time_step = time_step).reset_index().melt(id_vars = ['index'],var_name = 'OPNID')
217
+ implnds = hbn.get_implnd_constituent(constituent,time_step = time_step).reset_index().melt(id_vars = ['index'],var_name = 'OPNID')
204
218
 
205
- # For each subwatershed the average loading rate for a specific month
219
+ perlnds['OPERATION'] = 'PERLND'
220
+ implnds['OPERATION'] = 'IMPLND'
221
+
222
+ df = pd.concat([perlnds,implnds],axis=0)
223
+
224
+ #df = df.groupby(['OPNID','OPERATION'])['value'].mean().reset_index()
225
+
206
226
 
227
+ # units = 'lb/acre'
228
+ if constituent == 'Q':
229
+ df.loc[:, 'value'] = df['value']/12 # convert to ft/acre/month
230
+ # units = 'ft/acre'
207
231
 
208
- # '''
232
+ # df['unit'] = units
233
+ # df.rename(columns = {'index':'datetime','value': 'loading_rate'},inplace = True)
234
+ # df['constituent'] = constituent
235
+ # df['time_step'] = time_step
236
+ # df['year'] = pd.DatetimeIndex(df['datetime']).year
237
+ # df['month'] = pd.DatetimeIndex(df['datetime']).month
238
+
239
+
240
+ subwatersheds = uci.network.subwatersheds().reset_index()
241
+ subwatersheds = subwatersheds.loc[subwatersheds['SVOL'].isin(['PERLND','IMPLND'])]
242
+ areas = catchment_areas(uci)
243
+
244
+ df = pd.merge(subwatersheds,df,left_on = ['SVOL','SVOLNO'], right_on=['OPERATION','OPNID'],how='right')
245
+ df = pd.merge(df,areas,left_on = ['TVOLNO'], right_on='TVOLNO',how='left')
246
+ df['load'] = df['value']*df['AFACTR']
247
+ df = df.rename(columns = {'value':'loading_rate', 'AFACTR':'landcover_area','LSID':'landcover'})
248
+ df['constituent'] = constituent
249
+ return df[['index','constituent','TVOLNO','SVOLNO','SVOL','landcover','landcover_area','catchment_area','loading_rate','load']]
250
+
251
+
252
+ def get_catchment_loading(uci,hbn,constituent,by_landcover = False):
253
+ df = get_constituent_loading(uci,hbn,constituent)
254
+ if not by_landcover:
255
+ df = df.groupby(['TVOLNO','constituent'])[['landcover_area','load']].sum().reset_index()
256
+ df['loading_rate'] = df['load']/df['landcover_area']
257
+ return df
258
+
259
+ def get_watershed_loading(uci,hbn,reach_ids,constituent,upstream_reach_ids = None,by_landcover = False):
260
+ reach_ids = uci.network.get_opnids('RCHRES',reach_ids,upstream_reach_ids)
261
+
262
+ df = get_constituent_loading(uci,hbn,constituent)
263
+ df = df.loc[df['TVOLNO'].isin(reach_ids)]
264
+
265
+ if by_landcover:
266
+ df = df.groupby(['landcover','constituent'])[['landcover_area','load']].sum().reset_index()
267
+ df['loading_rate'] = df['load']/df['landcover_area']
268
+ else:
269
+ df = df.groupby(['constituent'])[['landcover_area','load']].sum().reset_index()
270
+ df['loading_rate'] = df['load']/df['landcover_area']
271
+
272
+ return df
273
+
274
+
275
+ #%% Contributions
276
+ allocation_selector = {'Q': {'input': ['IVOL'],
277
+ 'output': ['ROVOL']},
278
+ 'TP': {'input': ['PTOTIN'],
279
+ 'output': ['PTOTOUT']},
280
+ 'TSS': {'input': ['ISEDTOT'],
281
+ 'output': ['ROSEDTOT']},
282
+ 'OP': {'input': ['PO4INDIS'],
283
+ 'output': ['PO4OUTDIS']},
284
+ 'N': {'input': ['NO3INTOT','NO2INTOT'],
285
+ 'output': ['NO2OUTTOT','NO3OUTTOT']},
286
+ 'TKN': {'input': ['TAMINTOT','NTOTORGIN'],
287
+ 'output': ['TAMOUTTOT', 'NTOTORGOUT']}
288
+ }
289
+
290
+ def channel_inflows(constituent,uci,hbn,t_code,reach_ids = None):
291
+ load_in = sum([hbn.get_multiple_timeseries('RCHRES',
292
+ t_code,
293
+ t_cons,
294
+ opnids = reach_ids)
295
+ for t_cons in allocation_selector[constituent]['input']])
296
+
297
+ if constituent == 'TSS':
298
+ load_in = load_in*2000
299
+
300
+ return load_in
301
+
302
+ def channel_outflows(constituent,uci,hbn,t_code,reach_ids = None):
303
+ load_out = sum([hbn.get_multiple_timeseries('RCHRES',
304
+ t_code,
305
+ t_cons,
306
+ opnids = reach_ids)
307
+ for t_cons in allocation_selector[constituent]['output']])
308
+ if constituent == 'TSS':
309
+ load_out = load_out*2000
310
+ return load_out
311
+
312
+ def channel_fate(constituent,uci,hbn,t_code,reach_ids = None):
313
+ load_in = channel_inflows(constituent,uci,hbn,t_code,reach_ids)
314
+ load_out = channel_outflows(constituent,uci,hbn,t_code,reach_ids)
315
+ return load_out/load_in
316
+
317
+
318
+ def local_loading(constituent,uci,hbn,t_code,reach_ids = None):
319
+ load_in = channel_inflows(constituent,uci,hbn,t_code,reach_ids)
320
+ load_out = channel_outflows(constituent,uci,hbn,t_code,reach_ids)
321
+ df = pd.DataFrame({reach_id: load_in[reach_id] - load_out[uci.network.upstream(reach_id)].sum(axis=1) for reach_id in load_in.columns})
322
+ return df
323
+
324
+
325
+
326
+ def catchment_contributions(uci,hbn,constituent,target_reach_id, landcover = None):
327
+ p = uci.network.paths(target_reach_id)
328
+ p[target_reach_id] = [target_reach_id]
329
+ fate = channel_fate(constituent,uci,hbn,5)
330
+ fate_factors = pd.concat([fate[v].prod(axis=1) for k,v in p.items()],axis=1)
331
+ fate_factors.columns = list(p.keys())
332
+
333
+ fate_factors = fate_factors.reset_index().melt(id_vars = 'index')
334
+
335
+ df = get_catchment_loading(uci,hbn,constituent,by_landcover = True)
336
+ df = pd.merge(df,fate_factors,left_on = ['TVOLNO','index'],right_on = ['variable','index'])
337
+
338
+ df['contribution'] = df['value']*df['load']
339
+
340
+ target_load = channel_outflows(constituent,uci,hbn,5,[target_reach_id])
341
+
342
+ df = pd.merge(df,target_load.reset_index().melt(id_vars='index',var_name = 'target_reach',value_name = 'target_load'),left_on='index',right_on='index')
343
+ df['contribution_perc'] = df['contribution']/(df['target_load'])*100
344
+
345
+ df = df.groupby(['TVOLNO','landcover','landcover_area'])[['load','contribution','contribution_perc','target_load']].mean().reset_index()
346
+
347
+ if landcover is not None:
348
+ df = df.loc[df['landcover'] == landcover]
349
+
350
+ else:
351
+ df = df.groupby(['TVOLNO',])[['landcover_area','load','contribution','contribution_perc']].sum().reset_index()
352
+
353
+ return df
354
+
355
+ def total_contributions(constituent,uci,hbn,target_reach_id, as_percent = True):
356
+ p = uci.network.paths(target_reach_id)
357
+ p[target_reach_id] = [target_reach_id]
358
+ fate = channel_fate(constituent,uci,hbn,5)
359
+ loads = local_loading(constituent,uci,hbn,5)
360
+ fate_factors = pd.concat([fate[v].prod(axis=1) for k,v in p.items()],axis=1)
361
+ fate_factors.columns = list(p.keys())
362
+ loads = loads[loads.columns.intersection(fate_factors.columns)]
363
+ contribution = loads[fate_factors.columns].mul(fate_factors.values)
364
+ #allocations = loads.mul(loss_factors[loads.columns.get_level_values('reach_id')].values)
365
+
366
+ target_load = channel_outflows(constituent,uci,hbn,5,[target_reach_id])
367
+
368
+
369
+ df = contribution.mean().to_frame().reset_index()
370
+ df.columns = ['TVOLNO','contribution']
371
+
372
+ df['load'] = loads.mean().values
373
+ df['contribution_perc'] = (contribution.div(target_load.values)*100).mean().values
374
+ return df[['TVOLNO','load','contribution','contribution_perc']]
375
+
376
+ #%% LEGACY Catchment Loading (ie. Direct contributions from perlnds/implnds, no losses)
377
+ # Q
378
+ # Subwatershed Weighted Mean Timeseries Output
379
+ # operation = 'PERLND'
380
+ # ts_name = 'PERO',
381
+ # time_code = 5
382
+
209
383
 
210
384
  def avg_subwatershed_loading(constituent,t_code,uci,hbn):
211
385
  dfs = []
@@ -235,8 +409,7 @@ def avg_subwatershed_loading(constituent,t_code,uci,hbn):
235
409
  loading_rates.append(df.loc[subwatershed.index].sum().agg(agg_func)/subwatershed['AFACTR'].sum())
236
410
 
237
411
 
238
-
239
-
412
+
240
413
  def weighted_describe(df, value_col, weight_col):
241
414
  weighted_mean = (df[value_col] * df[weight_col]).sum() / df[weight_col].sum()
242
415
  weighted_var = ((df[value_col] - weighted_mean) ** 2 * df[weight_col]).sum() / df[weight_col].sum()
@@ -325,6 +498,7 @@ def ann_avg_subwatershed_loading(constituent,uci,hbn):
325
498
  return df
326
499
 
327
500
  def ann_avg_watershed_loading(constituent,reach_ids,uci,hbn, by_landcover = False):
501
+ reach_ids = [item for sublist in [uci.network._upstream(reach_id) for reach_id in reach_ids] for item in sublist]
328
502
  df = ann_avg_constituent_loading(constituent,uci,hbn)
329
503
  df = df.loc[df['TVOLNO'].isin(reach_ids)]
330
504
  if by_landcover:
@@ -334,244 +508,7 @@ def ann_avg_watershed_loading(constituent,reach_ids,uci,hbn, by_landcover = Fals
334
508
  df = weighted_describe(df,constituent,'AFACTR')
335
509
 
336
510
  return df
337
-
338
-
339
511
 
340
- # ds = xr.
341
- # coords = ['time']
342
- # dims = ['operation','activity','opnid','time_step','time','catchment_id']
343
- # def _insert_col(col_name,value,df):
344
- # if col_name not in df.columns:
345
- # df.insert(0,col_name,value)
346
-
347
- # dfs = []
348
- # for hbn in hbns.hbns:
349
- # for key, df in hbn.data_frames.items():
350
- # operation,activity,opnid,t_code = key.split('_')
351
- # t_code = int(t_code)
352
- # opnid = int(opnid)
353
- # df = hbn.data_frames[key]
354
- # df.index.name = 'date'
355
- # df.index = df.index.tz_localize(None)
356
- # _insert_col('t_code',t_code,df)
357
- # _insert_col('OPNID',opnid,df)
358
- # _insert_col('activity',activity,df)
359
- # df = df.reset_index().set_index(['date','OPNID','t_code','activity'])
360
- # dfs.append(xr.Dataset.from_dataframe(df))
361
-
362
- # ds = xr.merge(dfs)
363
-
364
- # query = {
365
- # 'date': (142.41, 142.51),
366
- # 'y': (-32.22, -32.32),
367
- # 'time': ('2015-01-01', '2016-12-31'),
368
- # 'measurements': ['nbart_nir', 'fmask'],
369
- # 'output_crs': 'EPSG:3577',
370
- # 'resolution': (-30, 30)
371
- # }
372
-
373
- # dfs = []
374
- # for activity, ts_names in hbn.output_names().items():
375
- # dfs
376
-
377
- # for hbn in hbn.hbns: data_frames['PERLND_SEDMNT_201_5']
378
-
379
-
380
- # time_steps = [2,3,4,5]
381
- # operations = ['PERLND','IMPLND','RCHRES']
382
-
383
-
384
-
385
- # # def flow_loading(uci,hbn,reach_ids,time_step='yearly',weighted = True):
386
-
387
- # t_con = 'PERO'
388
- # t_opn = 'PERLND'
389
- # time_step = 'yearly'
390
- # activity = 'PWATER'
391
-
392
-
393
-
394
- #def total_phosphorous_loading:
395
- # def phosphorous_loading(uci,hbns,reach_ids,time_tep = 'yearly'):
396
- # catchments = get_catchments(uci,reach_ids)
397
- # df = total_phosphorous(uci,hbns)
398
-
399
- # subwatershed = uci.network.subwatershed(reach_id)
400
- # perlnds = subwatershed.loc[subwatershed['SVOL'] == 'PERLND']
401
- # perlnds = perlnds.set_index('SVOLNO').drop_duplicates()
402
- # mlno = subwatershed.loc[subwatershed['SVOL'] == 'PERLND','MLNO'].iloc[0]
403
- # total = total_phosphorous(uci,hbn,mlno,t_code,perlnds.index)
404
-
405
-
406
-
407
-
408
- #%% Landscape Yields
409
-
410
- def yield_flow(uci,hbn,constituent,reach_id):
411
- hbn.get_rchres_data('Q',reach_id,'cfs','yearly')/uci.network.drainage_area(reach_id)
412
-
413
-
414
- def yield_sediment(uci,hbn,constituent,reach_id):
415
- hbn.get_rchres_data('TSS',reach_id,'lb','yearly').mean()*2000/uci.network.drainage_area(reach_id)
416
-
417
- def avg_ann_yield(uci,hbn,constituent,reach_ids):
418
- #reach_ids = uci.network.G.nodes
419
-
420
-
421
- _reach_ids = [uci.network._upstream(reach) for reach in reach_ids]
422
- _reach_ids = list(set([num for row in _reach_ids for num in row]))
423
- subwatersheds = uci.network.subwatersheds().loc[_reach_ids]
424
- area = subwatersheds['AFACTR'].sum()
425
-
426
- if constituent == 'Q':
427
- units = 'acrft'
428
- else:
429
- units = 'lb'
430
-
431
- df = hbn.get_reach_constituent(constituent,reach_ids,5,unit =units).mean() # Gross
432
-
433
- return df/area
434
-
435
-
436
- #%% Allocations
437
- allocation_selector = {'Q': {'input': ['IVOL'],
438
- 'output': ['ROVOL']},
439
- 'TP': {'input': ['PTOTIN'],
440
- 'output': ['PTOTOUT']},
441
- 'TSS': {'input': ['ISEDTOT'],
442
- 'output': ['ROSEDTOT']},
443
- 'OP': {'input': ['PO4INDIS'],
444
- 'output': ['PO4OUTDIS']},
445
- 'N': {'input': ['NO3INTOT','NO2INTOT'],
446
- 'output': ['NO2OUTTOT','NO3OUTTOT']},
447
- 'TKN': {'input': [],
448
- 'output': ['TAMOUTTOT', 'NTOTORGOUT']}
449
- }
450
-
451
- def fate(hbn,constituent,t_code,reach_ids = None):
452
- if constituent == 'Q':
453
- fate_in = hbn.get_multiple_timeseries('RCHRES',t_code,'ROVOL',opnids=reach_ids)
454
- fate_out = hbn.get_multiple_timeseries('RCHRES',t_code,'IVOL',opnids=reach_ids)
455
- elif constituent == 'TP':
456
- fate_in = hbn.get_multiple_timeseries('RCHRES',t_code,'PTOTOUT',opnids = reach_ids)
457
- fate_out = hbn.get_multiple_timeseries('RCHRES',t_code,'PTOTIN',opnids = reach_ids)
458
- elif constituent == 'TSS':
459
- fate_in = hbn.get_multiple_timeseries('RCHRES',t_code,'ISEDTOT',opnids = reach_ids)
460
- fate_out = hbn.get_multiple_timeseries('RCHRES',t_code,'ROSEDTOT',opnids = reach_ids)
461
- return fate_out/fate_in
462
-
463
- def loading(uci,hbn,constituent,t_code = 5):
464
- if constituent =='TP':
465
- loads = total_phosphorous(uci,hbn,t_code=t_code)
466
- else:
467
- #dfs = []
468
- # df_implnd = hbn.get_implnd_constituent(constituent,t_code,'lb').T.reset_index().rename(columns = {'index':'OPNID'})
469
- # df_implnd['SVOL'] = 'IMPLND'
470
-
471
- loads = hbn.get_perlnd_constituent(constituent,t_code,'lb')
472
-
473
-
474
- # .T.reset_index().rename(columns = {'index':'OPNID'})
475
- # df_perlnd['SVOL'] = 'PERLND'
476
-
477
- # df = pd.concat([df_perlnd,df_implnd])
478
- # df.set_index(['SVOL','OPNID'],inplace=True)
479
-
480
- if constituent == 'TSS':
481
- loads = loads*2000
482
-
483
- return loads
484
-
485
- def subwatershed_loading(uci,hbn,constituent,t_code,group_landcover = True,as_load = True):
486
- loads = loading(uci,hbn,constituent,t_code)
487
-
488
- subwatersheds = uci.network.subwatersheds()
489
- perlnds = subwatersheds.loc[subwatersheds['SVOL'] == 'PERLND'].reset_index()
490
-
491
- total = loads[perlnds['SVOLNO'].to_list()]
492
- total = total.mul(perlnds['AFACTR'].values,axis=1)
493
- total = total.transpose()
494
- total['reach_id'] = perlnds['TVOLNO'].values
495
- total['landcover'] = uci.table('PERLND','GEN-INFO').loc[total.index,'LSID'].to_list()
496
- total['area'] = perlnds['AFACTR'].to_list() #perlnds.loc[total.index,'AFACTR'].to_list()
497
- total = total.reset_index().set_index(['index','landcover','area','reach_id']).transpose()
498
- total.columns.names = ['perlnd_id','landcover','area','reach_id']
499
-
500
- if group_landcover:
501
- total.columns = total.columns.droplevel(['landcover','perlnd_id'])
502
- total = total.T.reset_index().groupby('reach_id').sum().reset_index().set_index(['reach_id','area']).T
503
-
504
- if not as_load:
505
- total = total.div(total.columns.get_level_values('area').values,axis=1)
506
-
507
- total.index = pd.to_datetime(total.index)
508
- return total
509
-
510
-
511
- def losses(uci,hbn,constituent, t_code = 5):
512
- upstream_reachs = {reach_id: uci.network.upstream(reach_id) for reach_id in uci.network.get_node_type_ids('RCHRES')}
513
- totout = sum([hbn.get_multiple_timeseries('RCHRES',
514
- t_code,
515
- t_cons,
516
- opnids = list(upstream_reachs.keys()))
517
- for t_cons in allocation_selector[constituent]['output']])
518
-
519
- totin = sum([hbn.get_multiple_timeseries('RCHRES',
520
- t_code,
521
- t_cons,
522
- opnids = list(upstream_reachs.keys()))
523
- for t_cons in allocation_selector[constituent]['input']])
524
-
525
-
526
- #totin = totout.copy().astype('Float64')
527
- #totin[:] = pd.NA
528
-
529
- for reach_id in totin.columns:
530
- reach_ids = upstream_reachs[reach_id]
531
- if len(reach_ids) > 0:
532
- totin[reach_id] = totout[reach_ids].sum(axis=1)
533
-
534
- #totin.columns = totout.columns
535
- return (totout-totin)/totin*100
536
-
537
- def allocations(uci,hbn,constituent,reach_id,t_code,group_landcover = True):
538
- p = uci.network.paths(reach_id)
539
- p[reach_id] = [reach_id]
540
- loss = losses(uci,hbn,constituent,t_code)
541
- loads = subwatershed_loading(uci,hbn,constituent,t_code,group_landcover = group_landcover)
542
- loss_factors = pd.concat([loss[v].prod(axis=1) for k,v in p.items()],axis=1)
543
- loss_factors.columns = list(p.keys())
544
- allocations = loads.mul(loss_factors[loads.columns.get_level_values('reach_id')].values)
545
- return allocations
546
-
547
-
548
- def total_phosphorous_losses(uci,hbn,t_code = 5):
549
- upstream_reachs = {reach_id: [reach_id] + uci.network.upstream(reach_id) for reach_id in uci.network.get_node_type_ids('RCHRES')}
550
- ptotout = hbn.get_multiple_timeseries('RCHRES',t_code,'PTOTOUT',opnids = list(upstream_reachs.keys()))
551
- ptotin = pd.concat([ptotout[reach_ids].sum(axis=1) for reach_id,reach_ids in upstream_reachs.items()],axis=1)
552
- ptotin.columns = list(upstream_reachs.keys())
553
- return 1-(ptotin-ptotout)/ptotin
554
-
555
-
556
- def total_phosphorous_allocations(uci,hbn,reach_id,t_code=5,group_landcover = True):
557
- p = uci.network.paths(reach_id)
558
- p[reach_id] = [reach_id]
559
- losses = total_phosphorous_losses(uci,hbn,t_code)
560
- loads = subwatershed_total_phosphorous_loading(uci,hbn,t_code=t_code,group_landcover = group_landcover)
561
- loss_factors = pd.concat([losses[v].prod(axis=1) for k,v in p.items()],axis=1)
562
- loss_factors.columns = list(p.keys())
563
- allocations = loads.mul(loss_factors[loads.columns.get_level_values('reach_id')].values)
564
- return allocations
565
-
566
- #loads[loads.index.get_level_values('reach_id').isin(loss_factors.columns)].mul(loss_factors.values,axis=1)
567
- #return loads[loss_factors.columns].mul(loss_factors.values,axis=1)
568
-
569
-
570
- def flow_allocations(uci,hbn,reach_id,t_code = 5):
571
- raise NotImplementedError()
572
-
573
- def total_suspended_sediment_allocations(uci,hbn,reach_id,t_code):
574
- raise NotImplementedError()
575
512
 
576
513
  #%% Water Balance
577
514
  def pevt_balance(mod,operation,opnid):
@@ -789,139 +726,9 @@ def avg_annual_precip(uci,wdm):
789
726
 
790
727
 
791
728
  #%%
792
- #%%% Report Tablewater_s
729
+ #%%% Other Reports
793
730
 
794
731
 
795
- def landcover_areas(uci):
796
- df = uci.network.operation_area('PERLND').groupby('LSID').sum()
797
- df['percent'] = 100*(df['AFACTR']/df['AFACTR'].sum())
798
- return df
799
-
800
- # def area_weighted_output(uci,hbn,ts_name,operation,time_step,opnids):
801
- # assert(operation in ['PERLND','IMPLND'])
802
- # df = hbn.get_multiple_timeseries(operation,5,ts_name,opnids = opnids).T
803
- # df.index.name = 'SVOLNO'
804
- # areas = uci.network.operation_area(operation)
805
- # df = df.join(areas).reset_index().set_index(['SVOLNO','AFACTR','LSID'])
806
- # df = df.T*df.index.get_level_values('AFACTR').values
807
-
808
- # if grouped:
809
- # df.columns.get_level_values('AFACTR').groupby(df.get_level_values['LSIDE'])
810
-
811
- '''
812
- Output for each PERLND
813
- - Sometimes a rate
814
- - Sometimes a mass or volume
815
-
816
- - Group by Landcover no without weighting
817
- - rate take the mean
818
- - mass or volum us the sum
819
- - Group by Landcover with weighting
820
- - rate convert to mass/volume sum then divide by grouped area
821
- - mass sum then divide by grouped area
822
-
823
-
824
- Output for a catchment
825
- - For a single catchment
826
- - if timeseries is a rate
827
- - rate is raw output
828
- - mass/volume is rate * area of contributing operations
829
- - if timeseries is a mass/volume
830
- - rate is mass/volume / area of contributing operations
831
- - mass/volume is raw output
832
- - No ability to aggregate by Landcover
833
- - For 2 or more catchments
834
- - if weighted
835
- - if timeseries is a rate
836
- - rate is rate*area of contributing operations summed by landcover and divided by each landcover area
837
- - mass/volume is rate*area summed by landcover and area
838
- - if timeseries is a mass/volume
839
- - rate is mass/volume summed by landcover and divided by landcover area
840
- - mass/volume is mass/volume summed by landcover
841
- - if not weighted
842
- - if timeseries is a rate
843
- - rate is the raw output of each catchment concatenated
844
- - mass/volume is rate*area of each contributing landcover and concatenated for each catchment
845
- - if timeseries is a mass/volume
846
- - rate is mass/volume / area of each contributing landcover and concatenated for each catchment
847
- - mass/volume is raw output of each chatchment concatenated
848
-
849
-
850
- '''
851
-
852
- # class Catchment:
853
- # def __init__(reach_id,uci,hbn = None):
854
- # id = reach_id
855
-
856
- # def loading_rate(constituent):
857
-
858
- # def loading(constituent):
859
-
860
- # def yield(constituent):
861
-
862
- # def load(constituent):
863
-
864
-
865
-
866
-
867
- '''
868
- The area of each landcategory in the catchment
869
-
870
- Loading rate of each landuse (lb/acre/intvl)
871
- TSS, TP, N, TKN, BOD, OP
872
-
873
- Loading of from each landuse (lb/intvl)
874
- TSS, TP, N, TKN, BOD, OP
875
-
876
- Yield at the catchment outlet (lb/acr/intvl)
877
- TSS, TP, N, TKN, BOD, OP
878
-
879
- Load at the catchment outlet (lb/intvl)
880
- TSS, TP, N, TKN, BOD, OP
881
-
882
- In channel losses of a constituent (lb/intvl)
883
- TSS, TP, N, TKN, BOD, OP
884
-
885
- Allocation of a constituent from catchment to downstream catchment
886
- TSS, TP, N, TKN, BOD, OP
887
-
888
-
889
-
890
-
891
- '''
892
-
893
- #reach_id = 103
894
- #def make_catchment(reach_id,uci,hbn):
895
-
896
-
897
-
898
-
899
- # class Reach:
900
-
901
-
902
- # class Perlnd():
903
- # def __init__(catchment_id,perlnd_id,area,mlno,landcover,metzone):
904
-
905
-
906
-
907
- # # class Implnd:
908
- # def annual_weighted_perlnd_output(uci,hbn,ts_name,t_code = 4,opnids = None):
909
-
910
- # df = hbn.get_multiple_timeseries('PERLND',5,ts_name,opnids = opnids)
911
- # subwatersheds = uci.network.subwatersheds().reset_index()
912
- # subwatersheds = subwatersheds.loc[subwatersheds['SVOL'] == 'PERLND']
913
- # df = df[subwatersheds['SVOLNO']].T
914
- # df = pd.merge(df, subwatersheds, left_index = True, right_on='SVOLNO', how='inner')
915
- # df = df.set_index(['TVOLNO','SVOL','SVOLNO','AFACTR','LSID','MLNO']).T
916
-
917
- # def annual_weighted_output(ts_name,operation,opnids):
918
- # assert(operation in ['PERLND','IMPLND'])
919
- # subwatersheds = uci.network.subwatersheds()
920
- # subwatersheds = subwatersheds.loc[subwatersheds['SVOL'] == operation].reset_index()
921
- # df = cal.model.hbns.get_multiple_timeseries('PERLND',5,'PERO',test['SVOLNO'].values).mean().reset_index()
922
- # df.columns = ['OPNID','value']
923
- # df = pd.merge(subwatersheds,df,left_on = 'SVOLNO', right_on='OPNID')
924
- # weighted_mean = df.groupby('TVOLNO').apply(lambda x: (x['value'] * x['AFACTR']).sum() / x['AFACTR'].sum())
925
732
 
926
733
 
927
734
  def weighted_mean(df,value_col,weight_col):
@@ -930,9 +737,9 @@ def weighted_mean(df,value_col,weight_col):
930
737
  'AFACTR' : df[weight_col].sum(),
931
738
  value_col: [weighted_mean]})
932
739
 
933
- def annual_weighted_output(uci,hbn,ts_name,operation = 'PERLND',opnids = None,group_by = None):
740
+ def annual_weighted_output(uci,hbn,ts_name,operation = 'PERLND',t_code = 5,opnids = None,group_by = None):
934
741
  assert (group_by in [None,'landcover','opnid'])
935
- df = hbn.get_multiple_timeseries(operation,5,ts_name,opnids = opnids).mean().reset_index()
742
+ df = hbn.get_multiple_timeseries(operation,t_code,ts_name,opnids = opnids).mean().reset_index()
936
743
  df.columns = ['SVOLNO',ts_name]
937
744
  subwatersheds = uci.network.subwatersheds().reset_index()
938
745
  subwatersheds = subwatersheds.loc[subwatersheds['SVOL'] == operation]
@@ -951,7 +758,6 @@ def annual_weighted_output(uci,hbn,ts_name,operation = 'PERLND',opnids = None,gr
951
758
  df = df.set_index([df.index,'AFACTR'])
952
759
  return df
953
760
 
954
-
955
761
 
956
762
  def monthly_weighted_output(uci,hbn,ts_name,operation = 'PERLND',opnids = None, as_rate = False, by_landcover = True, months = [1,2,3,4,5,6,7,8,9,10,11,12]):
957
763
  df = hbn.get_multiple_timeseries(operation,4,ts_name,opnids = opnids)
@@ -1096,7 +902,7 @@ def subwatershed_weighted_output(uci,hbn,reach_ids,ts_name,time_step,by_landcove
1096
902
 
1097
903
 
1098
904
 
1099
- #%% Phosphorous Loading
905
+ #%% Phosphorous Loading Calculations
1100
906
  def subwatershed_total_phosphorous_loading(uci,hbn,reach_ids = None,t_code=5, as_load = True,group_landcover = True):
1101
907
  tp_loading = total_phosphorous(uci,hbn,t_code)
1102
908
  if reach_ids is None:
@@ -1130,21 +936,21 @@ def subwatershed_total_phosphorous_loading(uci,hbn,reach_ids = None,t_code=5, as
1130
936
  total.index = pd.to_datetime(total.index)
1131
937
  return total
1132
938
 
1133
- def total_phosphorous(uci,hbn,t_code):
939
+ def total_phosphorous(uci,hbn,t_code,operation = 'PERLND'):
1134
940
  #assert(isinstance(perlnd_ids (int,list,None)))
1135
- perlnds = uci.network.subwatersheds()
1136
- perlnds = perlnds.loc[perlnds['SVOL'] == 'PERLND'].drop_duplicates(subset = ['SVOLNO','MLNO'])
941
+ opnids = uci.network.subwatersheds()
942
+ opnids = opnids.loc[opnids['SVOL'] == operation].drop_duplicates(subset = ['SVOLNO','MLNO'])
1137
943
 
1138
944
  totals = []
1139
- for mlno in perlnds['MLNO'].unique():
1140
- perlnd_ids = perlnds['SVOLNO'].loc[perlnds['MLNO'] == mlno].to_list()
1141
- total = dissolved_orthophosphate(uci,hbn,mlno,t_code) + particulate_orthophosphate(uci,hbn,mlno, t_code) + organic_refactory_phosphorous(uci,hbn,mlno,t_code) + labile_oxygen_demand(uci,hbn,mlno,t_code)*0.007326 # Conversation factor to P
1142
- totals.append(total[perlnd_ids])
945
+ for mlno in opnids['MLNO'].unique():
946
+ total = dissolved_orthophosphate(uci,hbn,operation,mlno,t_code) + particulate_orthophosphate(uci,hbn,operation,mlno, t_code) + organic_refactory_phosphorous(uci,hbn,operation,mlno,t_code) + labile_oxygen_demand(uci,hbn,operation,mlno,t_code)*0.007326 # Conversation factor to P
947
+ totals.append(total[opnids['SVOLNO'].loc[opnids['MLNO'] == mlno].to_list()])
1143
948
 
1144
949
  total = pd.concat(totals,axis=1)
1145
950
  total = total.T.groupby(total.columns).sum().T
1146
951
  return total
1147
952
 
953
+
1148
954
  MASSLINK_SCHEME = {'dissolved_orthophosphate': {'tmemn': 'NUIF1',
1149
955
  'tmemsb1': '4',
1150
956
  'tmemsb2':''},
@@ -1168,12 +974,13 @@ MASSLINK_SCHEME = {'dissolved_orthophosphate': {'tmemn': 'NUIF1',
1168
974
  'tmemsb2':''}}
1169
975
 
1170
976
 
1171
- def qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2 = '',t_code = 4):
977
+ def qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2 = '',t_code = 4):
1172
978
  masslink = uci.table('MASS-LINK',f'MASS-LINK{mlno}')
1173
979
  masslink = masslink.loc[(masslink['TMEMN'] == tmemn) & (masslink['TMEMSB1'] == tmemsb1) & (masslink['TMEMSB2'] == tmemsb2)]
980
+ masslink.fillna({'MFACTOR': 1}, inplace=True)
1174
981
  ts = 0
1175
982
  for index,row in masslink.iterrows():
1176
- hbn_name = uci.table('PERLND','QUAL-PROPS', int(row['SMEMSB1']) - 1).iloc[0]['QUALID']
983
+ hbn_name = uci.table(operation,'QUAL-PROPS', int(row['SMEMSB1']) - 1).iloc[0]['QUALID']
1177
984
  hbn_name = row['SMEMN'] + hbn_name
1178
985
  mfactor = row['MFACTOR']
1179
986
  ts = hbn.get_multiple_timeseries(row['SVOL'],t_code,hbn_name)*mfactor + ts
@@ -1182,49 +989,48 @@ def qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2 = '',t_code = 4):
1182
989
 
1183
990
 
1184
991
 
1185
- def dissolved_orthophosphate(uci,hbn,mlno,t_code = 4):
992
+ def dissolved_orthophosphate(uci,hbn,operation,mlno,t_code = 4):
1186
993
  tmemn = MASSLINK_SCHEME['dissolved_orthophosphate']['tmemn']
1187
994
  tmemsb1 = MASSLINK_SCHEME['dissolved_orthophosphate']['tmemsb1']
1188
995
  tmemsb2 = MASSLINK_SCHEME['dissolved_orthophosphate']['tmemsb2']
1189
- return qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1190
-
1191
- def particulate_orthophosphate(uci,hbn,mlno,t_code = 4):
1192
- ts = particulate_orthophosphate_sand(uci,hbn,mlno,t_code) + particulate_orthophosphate_silt(uci,hbn,mlno,t_code) + particulate_orthophosphate_clay(uci,hbn,mlno,t_code)
1193
- return ts
996
+ return qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1194
997
 
1195
- def particulate_orthophosphate_sand(uci,hbn, mlno,t_code = 4):
998
+ def particulate_orthophosphate_sand(uci,hbn,operation,mlno,t_code = 4):
1196
999
  tmemn = MASSLINK_SCHEME['particulate_orthophosphate_sand']['tmemn']
1197
1000
  tmemsb1 = MASSLINK_SCHEME['particulate_orthophosphate_sand']['tmemsb1']
1198
1001
  tmemsb2 = MASSLINK_SCHEME['particulate_orthophosphate_sand']['tmemsb2']
1199
- return qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1002
+ return qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1200
1003
 
1201
- def particulate_orthophosphate_silt(uci,hbn, mlno,t_code = 4):
1004
+ def particulate_orthophosphate_silt(uci,hbn,operation, mlno,t_code = 4):
1202
1005
  tmemn = MASSLINK_SCHEME['particulate_orthophosphate_silt']['tmemn']
1203
1006
  tmemsb1 = MASSLINK_SCHEME['particulate_orthophosphate_silt']['tmemsb1']
1204
1007
  tmemsb2 = MASSLINK_SCHEME['particulate_orthophosphate_silt']['tmemsb2']
1205
- return qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1008
+ return qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1206
1009
 
1207
- def particulate_orthophosphate_clay(uci,hbn, mlno,t_code = 4):
1010
+ def particulate_orthophosphate_clay(uci,hbn, operation,mlno,t_code = 4):
1208
1011
  tmemn = MASSLINK_SCHEME['particulate_orthophosphate_clay']['tmemn']
1209
1012
  tmemsb1 = MASSLINK_SCHEME['particulate_orthophosphate_clay']['tmemsb1']
1210
1013
  tmemsb2 = MASSLINK_SCHEME['particulate_orthophosphate_clay']['tmemsb2']
1211
- return qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1014
+ return qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1212
1015
 
1213
- def organic_refactory_phosphorous(uci,hbn, mlno,t_code = 4):
1016
+ def organic_refactory_phosphorous(uci,hbn, operation,mlno,t_code = 4):
1214
1017
  tmemn = MASSLINK_SCHEME['organic_refactory_phosphorous']['tmemn']
1215
1018
  tmemsb1 = MASSLINK_SCHEME['organic_refactory_phosphorous']['tmemsb1']
1216
1019
  tmemsb2 = MASSLINK_SCHEME['organic_refactory_phosphorous']['tmemsb2']
1217
- return qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1020
+ return qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1218
1021
 
1219
- def organic_refactory_carbon(uci,hbn, mlno,t_code = 4):
1022
+ def organic_refactory_carbon(uci,hbn, operation,mlno,t_code = 4):
1220
1023
  tmemn = MASSLINK_SCHEME['organic_refactory_carbon']['tmemn']
1221
1024
  tmemsb1 = MASSLINK_SCHEME['organic_refactory_carbon']['tmemsb1']
1222
1025
  tmemsb2 = MASSLINK_SCHEME['organic_refactory_carbon']['tmemsb2']
1223
- return qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1026
+ return qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1224
1027
 
1225
- def labile_oxygen_demand(uci,hbn,mlno,t_code = 4):
1028
+ def labile_oxygen_demand(uci,hbn,operation,mlno,t_code = 4):
1226
1029
  tmemn = MASSLINK_SCHEME['labile_oxygen_demand']['tmemn']
1227
1030
  tmemsb1 = MASSLINK_SCHEME['labile_oxygen_demand']['tmemsb1']
1228
1031
  tmemsb2 = MASSLINK_SCHEME['labile_oxygen_demand']['tmemsb2']
1229
- return qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1032
+ return qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1230
1033
 
1034
+ def particulate_orthophosphate(uci,hbn,operation,mlno,t_code = 4):
1035
+ ts = particulate_orthophosphate_sand(uci,hbn,operation,mlno,t_code) + particulate_orthophosphate_silt(uci,hbn,operation,mlno,t_code) + particulate_orthophosphate_clay(uci,hbn,operation,mlno,t_code)
1036
+ return ts