hspf 2.1.0__py3-none-any.whl → 2.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
hspf/reports.py CHANGED
@@ -6,7 +6,7 @@ Created on Mon Apr 11 08:26:04 2022
6
6
  """
7
7
  import numpy as np
8
8
  import pandas as pd
9
- from . import helpers
9
+ from hspf import helpers
10
10
  from pathlib import Path
11
11
 
12
12
  #timeseries_catalog = pd.read_csv(Path(__file__).parent/'TIMESERIES_CATALOG.csv')
@@ -37,22 +37,31 @@ class Reports():
37
37
  else:
38
38
  return annual_reach_water_budget(self.uci,self.hbns)
39
39
 
40
- # def annual_runoff(self):
41
- # #assert operation in ['PERLND','IMPLND']
42
- # #if operation == 'PERLND':
43
- # return annual_perlnd_runoff(self.uci,self.hbns)
44
- # #else:
45
- # # raise NotImplementedError()
40
+ def watershed_loading(self,constituent,reach_ids,upstream_reach_ids = None,by_landcover = False):
41
+ '''
42
+ Calculate the loading to channels from a watershed.
46
43
 
47
- # def monthly_runoff(self,landcover=None):
48
- # df = monthly_perlnd_runoff(self.uci,self.hbns).unstack().T
49
- # if landcover is None:
50
- # return df
51
- # else:
52
- # return df.loc[landcover]
53
-
54
- def annual_sediment_budget(self):
55
- return annual_sediment_budget(self.uci,self.hbns)
44
+ Parameters
45
+ ----------
46
+ constituent : str
47
+ Constituent to calculate loading for (e.g. 'TP', 'TSS', 'N', 'OP', 'Q', 'TKN')
48
+ reach_ids : list
49
+ List of reach IDs defining the watershed outlet
50
+ upstream_reach_ids : list, optional
51
+ List of reach IDs defining the upstream boundary of the watershed. The default is None.
52
+ by_landcover : bool, optional
53
+ If True, returns loading by landcover type. The default is False.
54
+ '''
55
+ return get_watershed_loading(self.uci,self.hbns,reach_ids,constituent,upstream_reach_ids,by_landcover)
56
+
57
+ def catchment_loading(self,constituent,by_landcover = False):
58
+ return get_catchment_loading(self.uci,self.hbns,constituent,by_landcover)
59
+
60
+ def contributions(self,constituent,target_reach_id):
61
+ return total_contributions(constituent,self.uci,self.hbns,target_reach_id)
62
+
63
+ def landcover_contributions(self,constituent,target_reach_id,landcover = None):
64
+ return catchment_contributions(self.uci,self.hbns,constituent,target_reach_id)
56
65
 
57
66
  def ann_avg_subwatershed_loading(self,constituent):
58
67
  return ann_avg_subwatershed_loading(constituent,self.uci, self.hbns)
@@ -68,33 +77,17 @@ class Reports():
68
77
  total['share'] = total['volume_percent']/total['area_percent']
69
78
  return total
70
79
 
71
- # def monthly_avg_subwatershed_loading(self,constituent,month):
72
- # return monthly_avg_subwatershed_loading(constituent,month,self.uci, self.hbns)
73
-
74
- # def monthly_avg_watershed_loading(self,constituent,reach_ids,month,by_landcover = True):
75
- # return monthly_avg_watershed_loading(constituent,reach_ids,month,self.uci, self.hbns,by_landcover = by_landcover)
76
-
77
-
78
- def ann_avg_yield(self,constituent,reach_ids):
79
- df= avg_ann_yield(self.uci,self.hbns,constituent,reach_ids)
80
+ def ann_avg_yield(self,constituent,reach_ids,upstream_reach_ids = None):
81
+ df= avg_ann_yield(self.uci,self.hbns,constituent,reach_ids,upstream_reach_ids)
80
82
  return df
81
83
 
82
84
  def annual_precip(self):
83
85
  return avg_annual_precip(self.uci,self.wdms)
84
86
 
85
- # def water_balance(self,reach_ids = None):
86
- # if reach_ids is None:
87
- # reach_ids = self.uci.network.outlets()
88
- # return water_balance(self.uci,self.hbns,self.wdms,reach_ids)
89
-
90
87
  def simulated_et(self):
91
88
  return simulated_et(self.uci,self.hbns)
92
89
 
93
- # def inflows(self):
94
- # return inflows(self.uci,self.wdms)
95
-
96
-
97
-
90
+
98
91
 
99
92
  #%% Channel Reports
100
93
  def scour(hbn,uci,start_year = '1996',end_year = '2030'):
@@ -132,6 +125,8 @@ def scour(hbn,uci,start_year = '1996',end_year = '2030'):
132
125
  # schematic block will have all the possible perlands while sosed only has perlands that were simulated
133
126
  # in other words information from sosed is a subset of schematic
134
127
  for tvolno in lakeflag.index: #schematic['TVOLNO'].unique():
128
+ implnd_load = 0
129
+ prlnd_load = 0
135
130
  reach_load = depscr.loc[tvolno].values[0]
136
131
  schem_sub = schematic[schematic['TVOLNO'] == tvolno]
137
132
  if len(schem_sub) == 0:
@@ -175,37 +170,218 @@ def get_catchments(uci,reach_ids):
175
170
  return landcover
176
171
 
177
172
 
178
- #%% Catchment Loading (ie. Direct contributions from perlnds/implnds, no losses)
179
- # Q
180
- # Subwatershed Weighted Mean Timeseries Output
181
- # operation = 'PERLND'
182
- # ts_name = 'PERO',
183
- # time_code = 5
184
173
 
174
+ #%% Landscape Yields
175
+
176
+ def yield_flow(uci,hbn,constituent,reach_id):
177
+ hbn.get_rchres_data('Q',reach_id,'cfs','yearly')/uci.network.drainage_area(reach_id)
178
+
179
+
180
+ def yield_sediment(uci,hbn,constituent,reach_id):
181
+ hbn.get_rchres_data('TSS',reach_id,'lb','yearly').mean()*2000/uci.network.drainage_area(reach_id)
182
+
183
+ def avg_ann_yield(uci,hbn,constituent,reach_ids,upstream_reach_ids = None):
184
+ #reach_ids = uci.network.G.nodes
185
+
186
+ reach_ids = uci.network.get_opnids('RCHRES',reach_ids,upstream_reach_ids)
187
+ area = uci.network.drainage_area(reach_ids,upstream_reach_ids)
188
+
189
+ if constituent == 'Q':
190
+ units = 'acrft'
191
+ else:
192
+ units = 'lb'
193
+
194
+ df = hbn.get_reach_constituent(constituent,reach_ids,5,unit =units).mean() # Gross
195
+
196
+ return df/area
197
+
198
+ #%% Catchment and Watershed Loading
199
+
200
+ def landcover_areas(uci):
201
+ df = uci.network.operation_area('PERLND').groupby('LSID').sum()
202
+ df['percent'] = 100*(df['AFACTR']/df['AFACTR'].sum())
203
+ return df
185
204
 
186
- LOADING_MAP = {'Q' : [{'t_opn':'PERLND',
187
- 't_con': 'PERO',
188
- 't_code': 'yearly',
189
- 'activity': 'PWATER'}],
190
- 'TSS': [{'t_opn':'PERLND',
191
- 't_con': 'SOSED',
192
- 't_code': 'yearly',
193
- 'activity': 'SEDMNT'},
194
- {'t_opn':'IMPLND',
195
- 't_con': 'SOSED',
196
- 't_code': 'yearly',
197
- 'activity': 'SEDMNT'}]}
205
+ def catchment_areas(uci):
206
+ df = uci.network.subwatersheds().reset_index()
207
+ df = df.groupby('TVOLNO')['AFACTR'].sum().reset_index()
208
+ df.rename(columns = {'AFACTR':'catchment_area'},inplace = True)
209
+ return df
198
210
 
211
+ def get_constituent_loading(uci,hbn,constituent,time_step = 5):
199
212
 
200
- # def annual_average_subwatershed_loading(constituent,uci,hbn,reach_ids):
201
- # '''
202
213
 
203
- # For each subwatershed the annual average loading rate
214
+ if constituent == 'TP':
215
+ perlnds = total_phosphorous(uci,hbn,t_code=time_step,operation = 'PERLND').reset_index().melt(id_vars = ['index'],var_name = 'OPNID')
216
+ implnds = total_phosphorous(uci,hbn,t_code=time_step,operation = 'IMPLND').reset_index().melt(id_vars = ['index'],var_name = 'OPNID')
217
+ else:
218
+ perlnds = hbn.get_perlnd_constituent(constituent,time_step = time_step).reset_index().melt(id_vars = ['index'],var_name = 'OPNID')
219
+ implnds = hbn.get_implnd_constituent(constituent,time_step = time_step).reset_index().melt(id_vars = ['index'],var_name = 'OPNID')
204
220
 
205
- # For each subwatershed the average loading rate for a specific month
221
+ perlnds['OPERATION'] = 'PERLND'
222
+ implnds['OPERATION'] = 'IMPLND'
223
+
224
+ df = pd.concat([perlnds,implnds],axis=0)
225
+
226
+ #df = df.groupby(['OPNID','OPERATION'])['value'].mean().reset_index()
227
+
206
228
 
229
+ # units = 'lb/acre'
230
+ if constituent == 'Q':
231
+ df.loc[:, 'value'] = df['value']/12 # convert to ft/acre/month
232
+ # units = 'ft/acre'
207
233
 
208
- # '''
234
+ # df['unit'] = units
235
+ # df.rename(columns = {'index':'datetime','value': 'loading_rate'},inplace = True)
236
+ # df['constituent'] = constituent
237
+ # df['time_step'] = time_step
238
+ # df['year'] = pd.DatetimeIndex(df['datetime']).year
239
+ # df['month'] = pd.DatetimeIndex(df['datetime']).month
240
+
241
+
242
+ subwatersheds = uci.network.subwatersheds().reset_index()
243
+ subwatersheds = subwatersheds.loc[subwatersheds['SVOL'].isin(['PERLND','IMPLND'])]
244
+ areas = catchment_areas(uci)
245
+
246
+ df = pd.merge(subwatersheds,df,left_on = ['SVOL','SVOLNO'], right_on=['OPERATION','OPNID'],how='right')
247
+ df = pd.merge(df,areas,left_on = ['TVOLNO'], right_on='TVOLNO',how='left')
248
+ df['load'] = df['value']*df['AFACTR']
249
+ df = df.rename(columns = {'value':'loading_rate', 'AFACTR':'landcover_area','LSID':'landcover'})
250
+ df['constituent'] = constituent
251
+ return df[['index','constituent','TVOLNO','SVOLNO','SVOL','landcover','landcover_area','catchment_area','loading_rate','load']]
252
+
253
+
254
+ def get_catchment_loading(uci,hbn,constituent,by_landcover = False):
255
+ df = get_constituent_loading(uci,hbn,constituent)
256
+ if not by_landcover:
257
+ df = df.groupby(['TVOLNO','constituent'])[['landcover_area','load']].sum().reset_index()
258
+ df['loading_rate'] = df['load']/df['landcover_area']
259
+ return df
260
+
261
+ def get_watershed_loading(uci,hbn,reach_ids,constituent,upstream_reach_ids = None,by_landcover = False):
262
+ reach_ids = uci.network.get_opnids('RCHRES',reach_ids,upstream_reach_ids)
263
+
264
+ df = get_constituent_loading(uci,hbn,constituent)
265
+ df = df.loc[df['TVOLNO'].isin(reach_ids)]
266
+
267
+ if by_landcover:
268
+ df = df.groupby(['landcover','constituent'])[['landcover_area','load']].sum().reset_index()
269
+ df['loading_rate'] = df['load']/df['landcover_area']
270
+ else:
271
+ df = df.groupby(['constituent'])[['landcover_area','load']].sum().reset_index()
272
+ df['loading_rate'] = df['load']/df['landcover_area']
273
+
274
+ return df
275
+
276
+
277
+ #%% Contributions
278
+ allocation_selector = {'Q': {'input': ['IVOL'],
279
+ 'output': ['ROVOL']},
280
+ 'TP': {'input': ['PTOTIN'],
281
+ 'output': ['PTOTOUT']},
282
+ 'TSS': {'input': ['ISEDTOT'],
283
+ 'output': ['ROSEDTOT']},
284
+ 'OP': {'input': ['PO4INDIS'],
285
+ 'output': ['PO4OUTDIS']},
286
+ 'N': {'input': ['NO3INTOT','NO2INTOT'],
287
+ 'output': ['NO2OUTTOT','NO3OUTTOT']},
288
+ 'TKN': {'input': ['TAMINTOT','NTOTORGIN'],
289
+ 'output': ['TAMOUTTOT', 'NTOTORGOUT']}
290
+ }
291
+
292
+ def channel_inflows(constituent,uci,hbn,t_code,reach_ids = None):
293
+ load_in = sum([hbn.get_multiple_timeseries('RCHRES',
294
+ t_code,
295
+ t_cons,
296
+ opnids = reach_ids)
297
+ for t_cons in allocation_selector[constituent]['input']])
298
+
299
+ if constituent == 'TSS':
300
+ load_in = load_in*2000
301
+
302
+ return load_in
303
+
304
+ def channel_outflows(constituent,uci,hbn,t_code,reach_ids = None):
305
+ load_out = sum([hbn.get_multiple_timeseries('RCHRES',
306
+ t_code,
307
+ t_cons,
308
+ opnids = reach_ids)
309
+ for t_cons in allocation_selector[constituent]['output']])
310
+ if constituent == 'TSS':
311
+ load_out = load_out*2000
312
+ return load_out
313
+
314
+ def channel_fate(constituent,uci,hbn,t_code,reach_ids = None):
315
+ load_in = channel_inflows(constituent,uci,hbn,t_code,reach_ids)
316
+ load_out = channel_outflows(constituent,uci,hbn,t_code,reach_ids)
317
+ return load_out/load_in
318
+
319
+
320
+ def local_loading(constituent,uci,hbn,t_code,reach_ids = None):
321
+ load_in = channel_inflows(constituent,uci,hbn,t_code,reach_ids)
322
+ load_out = channel_outflows(constituent,uci,hbn,t_code,reach_ids)
323
+ df = pd.DataFrame({reach_id: load_in[reach_id] - load_out[uci.network.upstream(reach_id)].sum(axis=1) for reach_id in load_in.columns})
324
+ return df
325
+
326
+
327
+
328
+ def catchment_contributions(uci,hbn,constituent,target_reach_id, landcover = None):
329
+ p = uci.network.paths(target_reach_id)
330
+ p[target_reach_id] = [target_reach_id]
331
+ fate = channel_fate(constituent,uci,hbn,5)
332
+ fate_factors = pd.concat([fate[v].prod(axis=1) for k,v in p.items()],axis=1)
333
+ fate_factors.columns = list(p.keys())
334
+
335
+ fate_factors = fate_factors.reset_index().melt(id_vars = 'index')
336
+
337
+ df = get_catchment_loading(uci,hbn,constituent,by_landcover = True)
338
+ df = pd.merge(df,fate_factors,left_on = ['TVOLNO','index'],right_on = ['variable','index'])
339
+
340
+ df['contribution'] = df['value']*df['load']
341
+
342
+ target_load = channel_outflows(constituent,uci,hbn,5,[target_reach_id])
343
+
344
+ df = pd.merge(df,target_load.reset_index().melt(id_vars='index',var_name = 'target_reach',value_name = 'target_load'),left_on='index',right_on='index')
345
+ df['contribution_perc'] = df['contribution']/(df['target_load'])*100
346
+
347
+ df = df.groupby(['TVOLNO','landcover','landcover_area'])[['load','contribution','contribution_perc','target_load']].mean().reset_index()
348
+
349
+ if landcover is not None:
350
+ df = df.loc[df['landcover'] == landcover]
351
+
352
+ else:
353
+ df = df.groupby(['TVOLNO',])[['landcover_area','load','contribution','contribution_perc']].sum().reset_index()
354
+
355
+ return df
356
+
357
+ def total_contributions(constituent,uci,hbn,target_reach_id, as_percent = True):
358
+ p = uci.network.paths(target_reach_id)
359
+ p[target_reach_id] = [target_reach_id]
360
+ fate = channel_fate(constituent,uci,hbn,5)
361
+ loads = local_loading(constituent,uci,hbn,5)
362
+ fate_factors = pd.concat([fate[v].prod(axis=1) for k,v in p.items()],axis=1)
363
+ fate_factors.columns = list(p.keys())
364
+ loads = loads[loads.columns.intersection(fate_factors.columns)]
365
+ contribution = loads[fate_factors.columns].mul(fate_factors.values)
366
+ #allocations = loads.mul(loss_factors[loads.columns.get_level_values('reach_id')].values)
367
+
368
+ target_load = channel_outflows(constituent,uci,hbn,5,[target_reach_id])
369
+
370
+
371
+ df = contribution.mean().to_frame().reset_index()
372
+ df.columns = ['TVOLNO','contribution']
373
+
374
+ df['load'] = loads.mean().values
375
+ df['contribution_perc'] = (contribution.div(target_load.values)*100).mean().values
376
+ return df[['TVOLNO','load','contribution','contribution_perc']]
377
+
378
+ #%% LEGACY Catchment Loading (ie. Direct contributions from perlnds/implnds, no losses)
379
+ # Q
380
+ # Subwatershed Weighted Mean Timeseries Output
381
+ # operation = 'PERLND'
382
+ # ts_name = 'PERO',
383
+ # time_code = 5
384
+
209
385
 
210
386
  def avg_subwatershed_loading(constituent,t_code,uci,hbn):
211
387
  dfs = []
@@ -235,8 +411,7 @@ def avg_subwatershed_loading(constituent,t_code,uci,hbn):
235
411
  loading_rates.append(df.loc[subwatershed.index].sum().agg(agg_func)/subwatershed['AFACTR'].sum())
236
412
 
237
413
 
238
-
239
-
414
+
240
415
  def weighted_describe(df, value_col, weight_col):
241
416
  weighted_mean = (df[value_col] * df[weight_col]).sum() / df[weight_col].sum()
242
417
  weighted_var = ((df[value_col] - weighted_mean) ** 2 * df[weight_col]).sum() / df[weight_col].sum()
@@ -335,244 +510,7 @@ def ann_avg_watershed_loading(constituent,reach_ids,uci,hbn, by_landcover = Fals
335
510
  df = weighted_describe(df,constituent,'AFACTR')
336
511
 
337
512
  return df
338
-
339
-
340
-
341
- # ds = xr.
342
- # coords = ['time']
343
- # dims = ['operation','activity','opnid','time_step','time','catchment_id']
344
- # def _insert_col(col_name,value,df):
345
- # if col_name not in df.columns:
346
- # df.insert(0,col_name,value)
347
-
348
- # dfs = []
349
- # for hbn in hbns.hbns:
350
- # for key, df in hbn.data_frames.items():
351
- # operation,activity,opnid,t_code = key.split('_')
352
- # t_code = int(t_code)
353
- # opnid = int(opnid)
354
- # df = hbn.data_frames[key]
355
- # df.index.name = 'date'
356
- # df.index = df.index.tz_localize(None)
357
- # _insert_col('t_code',t_code,df)
358
- # _insert_col('OPNID',opnid,df)
359
- # _insert_col('activity',activity,df)
360
- # df = df.reset_index().set_index(['date','OPNID','t_code','activity'])
361
- # dfs.append(xr.Dataset.from_dataframe(df))
362
-
363
- # ds = xr.merge(dfs)
364
-
365
- # query = {
366
- # 'date': (142.41, 142.51),
367
- # 'y': (-32.22, -32.32),
368
- # 'time': ('2015-01-01', '2016-12-31'),
369
- # 'measurements': ['nbart_nir', 'fmask'],
370
- # 'output_crs': 'EPSG:3577',
371
- # 'resolution': (-30, 30)
372
- # }
373
-
374
- # dfs = []
375
- # for activity, ts_names in hbn.output_names().items():
376
- # dfs
377
-
378
- # for hbn in hbn.hbns: data_frames['PERLND_SEDMNT_201_5']
379
-
380
-
381
- # time_steps = [2,3,4,5]
382
- # operations = ['PERLND','IMPLND','RCHRES']
383
-
384
-
385
-
386
- # # def flow_loading(uci,hbn,reach_ids,time_step='yearly',weighted = True):
387
-
388
- # t_con = 'PERO'
389
- # t_opn = 'PERLND'
390
- # time_step = 'yearly'
391
- # activity = 'PWATER'
392
-
393
-
394
-
395
- #def total_phosphorous_loading:
396
- # def phosphorous_loading(uci,hbns,reach_ids,time_tep = 'yearly'):
397
- # catchments = get_catchments(uci,reach_ids)
398
- # df = total_phosphorous(uci,hbns)
399
-
400
- # subwatershed = uci.network.subwatershed(reach_id)
401
- # perlnds = subwatershed.loc[subwatershed['SVOL'] == 'PERLND']
402
- # perlnds = perlnds.set_index('SVOLNO').drop_duplicates()
403
- # mlno = subwatershed.loc[subwatershed['SVOL'] == 'PERLND','MLNO'].iloc[0]
404
- # total = total_phosphorous(uci,hbn,mlno,t_code,perlnds.index)
405
-
406
-
407
-
408
-
409
- #%% Landscape Yields
410
-
411
- def yield_flow(uci,hbn,constituent,reach_id):
412
- hbn.get_rchres_data('Q',reach_id,'cfs','yearly')/uci.network.drainage_area(reach_id)
413
-
414
-
415
- def yield_sediment(uci,hbn,constituent,reach_id):
416
- hbn.get_rchres_data('TSS',reach_id,'lb','yearly').mean()*2000/uci.network.drainage_area(reach_id)
417
-
418
- def avg_ann_yield(uci,hbn,constituent,reach_ids):
419
- #reach_ids = uci.network.G.nodes
420
-
421
-
422
- _reach_ids = [uci.network._upstream(reach) for reach in reach_ids]
423
- _reach_ids = list(set([num for row in _reach_ids for num in row]))
424
- subwatersheds = uci.network.subwatersheds().loc[_reach_ids]
425
- area = subwatersheds['AFACTR'].sum()
426
-
427
- if constituent == 'Q':
428
- units = 'acrft'
429
- else:
430
- units = 'lb'
431
-
432
- df = hbn.get_reach_constituent(constituent,reach_ids,5,unit =units).mean() # Gross
433
-
434
- return df/area
435
-
436
-
437
- #%% Allocations
438
- allocation_selector = {'Q': {'input': ['IVOL'],
439
- 'output': ['ROVOL']},
440
- 'TP': {'input': ['PTOTIN'],
441
- 'output': ['PTOTOUT']},
442
- 'TSS': {'input': ['ISEDTOT'],
443
- 'output': ['ROSEDTOT']},
444
- 'OP': {'input': ['PO4INDIS'],
445
- 'output': ['PO4OUTDIS']},
446
- 'N': {'input': ['NO3INTOT','NO2INTOT'],
447
- 'output': ['NO2OUTTOT','NO3OUTTOT']},
448
- 'TKN': {'input': [],
449
- 'output': ['TAMOUTTOT', 'NTOTORGOUT']}
450
- }
451
513
 
452
- def fate(hbn,constituent,t_code,reach_ids = None):
453
- if constituent == 'Q':
454
- fate_in = hbn.get_multiple_timeseries('RCHRES',t_code,'ROVOL',opnids=reach_ids)
455
- fate_out = hbn.get_multiple_timeseries('RCHRES',t_code,'IVOL',opnids=reach_ids)
456
- elif constituent == 'TP':
457
- fate_in = hbn.get_multiple_timeseries('RCHRES',t_code,'PTOTOUT',opnids = reach_ids)
458
- fate_out = hbn.get_multiple_timeseries('RCHRES',t_code,'PTOTIN',opnids = reach_ids)
459
- elif constituent == 'TSS':
460
- fate_in = hbn.get_multiple_timeseries('RCHRES',t_code,'ISEDTOT',opnids = reach_ids)
461
- fate_out = hbn.get_multiple_timeseries('RCHRES',t_code,'ROSEDTOT',opnids = reach_ids)
462
- return fate_out/fate_in
463
-
464
- def loading(uci,hbn,constituent,t_code = 5):
465
- if constituent =='TP':
466
- loads = total_phosphorous(uci,hbn,t_code=t_code)
467
- else:
468
- #dfs = []
469
- # df_implnd = hbn.get_implnd_constituent(constituent,t_code,'lb').T.reset_index().rename(columns = {'index':'OPNID'})
470
- # df_implnd['SVOL'] = 'IMPLND'
471
-
472
- loads = hbn.get_perlnd_constituent(constituent,t_code,'lb')
473
-
474
-
475
- # .T.reset_index().rename(columns = {'index':'OPNID'})
476
- # df_perlnd['SVOL'] = 'PERLND'
477
-
478
- # df = pd.concat([df_perlnd,df_implnd])
479
- # df.set_index(['SVOL','OPNID'],inplace=True)
480
-
481
- if constituent == 'TSS':
482
- loads = loads*2000
483
-
484
- return loads
485
-
486
- def subwatershed_loading(uci,hbn,constituent,t_code,group_landcover = True,as_load = True):
487
- loads = loading(uci,hbn,constituent,t_code)
488
-
489
- subwatersheds = uci.network.subwatersheds()
490
- perlnds = subwatersheds.loc[subwatersheds['SVOL'] == 'PERLND'].reset_index()
491
-
492
- total = loads[perlnds['SVOLNO'].to_list()]
493
- total = total.mul(perlnds['AFACTR'].values,axis=1)
494
- total = total.transpose()
495
- total['reach_id'] = perlnds['TVOLNO'].values
496
- total['landcover'] = uci.table('PERLND','GEN-INFO').loc[total.index,'LSID'].to_list()
497
- total['area'] = perlnds['AFACTR'].to_list() #perlnds.loc[total.index,'AFACTR'].to_list()
498
- total = total.reset_index().set_index(['index','landcover','area','reach_id']).transpose()
499
- total.columns.names = ['perlnd_id','landcover','area','reach_id']
500
-
501
- if group_landcover:
502
- total.columns = total.columns.droplevel(['landcover','perlnd_id'])
503
- total = total.T.reset_index().groupby('reach_id').sum().reset_index().set_index(['reach_id','area']).T
504
-
505
- if not as_load:
506
- total = total.div(total.columns.get_level_values('area').values,axis=1)
507
-
508
- total.index = pd.to_datetime(total.index)
509
- return total
510
-
511
-
512
- def losses(uci,hbn,constituent, t_code = 5):
513
- upstream_reachs = {reach_id: uci.network.upstream(reach_id) for reach_id in uci.network.get_node_type_ids('RCHRES')}
514
- totout = sum([hbn.get_multiple_timeseries('RCHRES',
515
- t_code,
516
- t_cons,
517
- opnids = list(upstream_reachs.keys()))
518
- for t_cons in allocation_selector[constituent]['output']])
519
-
520
- totin = sum([hbn.get_multiple_timeseries('RCHRES',
521
- t_code,
522
- t_cons,
523
- opnids = list(upstream_reachs.keys()))
524
- for t_cons in allocation_selector[constituent]['input']])
525
-
526
-
527
- #totin = totout.copy().astype('Float64')
528
- #totin[:] = pd.NA
529
-
530
- for reach_id in totin.columns:
531
- reach_ids = upstream_reachs[reach_id]
532
- if len(reach_ids) > 0:
533
- totin[reach_id] = totout[reach_ids].sum(axis=1)
534
-
535
- #totin.columns = totout.columns
536
- return (totout-totin)/totin*100
537
-
538
- def allocations(uci,hbn,constituent,reach_id,t_code,group_landcover = True):
539
- p = uci.network.paths(reach_id)
540
- p[reach_id] = [reach_id]
541
- loss = losses(uci,hbn,constituent,t_code)
542
- loads = subwatershed_loading(uci,hbn,constituent,t_code,group_landcover = group_landcover)
543
- loss_factors = pd.concat([loss[v].prod(axis=1) for k,v in p.items()],axis=1)
544
- loss_factors.columns = list(p.keys())
545
- allocations = loads.mul(loss_factors[loads.columns.get_level_values('reach_id')].values)
546
- return allocations
547
-
548
-
549
- def total_phosphorous_losses(uci,hbn,t_code = 5):
550
- upstream_reachs = {reach_id: [reach_id] + uci.network.upstream(reach_id) for reach_id in uci.network.get_node_type_ids('RCHRES')}
551
- ptotout = hbn.get_multiple_timeseries('RCHRES',t_code,'PTOTOUT',opnids = list(upstream_reachs.keys()))
552
- ptotin = pd.concat([ptotout[reach_ids].sum(axis=1) for reach_id,reach_ids in upstream_reachs.items()],axis=1)
553
- ptotin.columns = list(upstream_reachs.keys())
554
- return 1-(ptotin-ptotout)/ptotin
555
-
556
-
557
- def total_phosphorous_allocations(uci,hbn,reach_id,t_code=5,group_landcover = True):
558
- p = uci.network.paths(reach_id)
559
- p[reach_id] = [reach_id]
560
- losses = total_phosphorous_losses(uci,hbn,t_code)
561
- loads = subwatershed_total_phosphorous_loading(uci,hbn,t_code=t_code,group_landcover = group_landcover)
562
- loss_factors = pd.concat([losses[v].prod(axis=1) for k,v in p.items()],axis=1)
563
- loss_factors.columns = list(p.keys())
564
- allocations = loads.mul(loss_factors[loads.columns.get_level_values('reach_id')].values)
565
- return allocations
566
-
567
- #loads[loads.index.get_level_values('reach_id').isin(loss_factors.columns)].mul(loss_factors.values,axis=1)
568
- #return loads[loss_factors.columns].mul(loss_factors.values,axis=1)
569
-
570
-
571
- def flow_allocations(uci,hbn,reach_id,t_code = 5):
572
- raise NotImplementedError()
573
-
574
- def total_suspended_sediment_allocations(uci,hbn,reach_id,t_code):
575
- raise NotImplementedError()
576
514
 
577
515
  #%% Water Balance
578
516
  def pevt_balance(mod,operation,opnid):
@@ -790,139 +728,9 @@ def avg_annual_precip(uci,wdm):
790
728
 
791
729
 
792
730
  #%%
793
- #%%% Report Tablewater_s
731
+ #%%% Other Reports
794
732
 
795
733
 
796
- def landcover_areas(uci):
797
- df = uci.network.operation_area('PERLND').groupby('LSID').sum()
798
- df['percent'] = 100*(df['AFACTR']/df['AFACTR'].sum())
799
- return df
800
-
801
- # def area_weighted_output(uci,hbn,ts_name,operation,time_step,opnids):
802
- # assert(operation in ['PERLND','IMPLND'])
803
- # df = hbn.get_multiple_timeseries(operation,5,ts_name,opnids = opnids).T
804
- # df.index.name = 'SVOLNO'
805
- # areas = uci.network.operation_area(operation)
806
- # df = df.join(areas).reset_index().set_index(['SVOLNO','AFACTR','LSID'])
807
- # df = df.T*df.index.get_level_values('AFACTR').values
808
-
809
- # if grouped:
810
- # df.columns.get_level_values('AFACTR').groupby(df.get_level_values['LSIDE'])
811
-
812
- '''
813
- Output for each PERLND
814
- - Sometimes a rate
815
- - Sometimes a mass or volume
816
-
817
- - Group by Landcover no without weighting
818
- - rate take the mean
819
- - mass or volum us the sum
820
- - Group by Landcover with weighting
821
- - rate convert to mass/volume sum then divide by grouped area
822
- - mass sum then divide by grouped area
823
-
824
-
825
- Output for a catchment
826
- - For a single catchment
827
- - if timeseries is a rate
828
- - rate is raw output
829
- - mass/volume is rate * area of contributing operations
830
- - if timeseries is a mass/volume
831
- - rate is mass/volume / area of contributing operations
832
- - mass/volume is raw output
833
- - No ability to aggregate by Landcover
834
- - For 2 or more catchments
835
- - if weighted
836
- - if timeseries is a rate
837
- - rate is rate*area of contributing operations summed by landcover and divided by each landcover area
838
- - mass/volume is rate*area summed by landcover and area
839
- - if timeseries is a mass/volume
840
- - rate is mass/volume summed by landcover and divided by landcover area
841
- - mass/volume is mass/volume summed by landcover
842
- - if not weighted
843
- - if timeseries is a rate
844
- - rate is the raw output of each catchment concatenated
845
- - mass/volume is rate*area of each contributing landcover and concatenated for each catchment
846
- - if timeseries is a mass/volume
847
- - rate is mass/volume / area of each contributing landcover and concatenated for each catchment
848
- - mass/volume is raw output of each chatchment concatenated
849
-
850
-
851
- '''
852
-
853
- # class Catchment:
854
- # def __init__(reach_id,uci,hbn = None):
855
- # id = reach_id
856
-
857
- # def loading_rate(constituent):
858
-
859
- # def loading(constituent):
860
-
861
- # def yield(constituent):
862
-
863
- # def load(constituent):
864
-
865
-
866
-
867
-
868
- '''
869
- The area of each landcategory in the catchment
870
-
871
- Loading rate of each landuse (lb/acre/intvl)
872
- TSS, TP, N, TKN, BOD, OP
873
-
874
- Loading of from each landuse (lb/intvl)
875
- TSS, TP, N, TKN, BOD, OP
876
-
877
- Yield at the catchment outlet (lb/acr/intvl)
878
- TSS, TP, N, TKN, BOD, OP
879
-
880
- Load at the catchment outlet (lb/intvl)
881
- TSS, TP, N, TKN, BOD, OP
882
-
883
- In channel losses of a constituent (lb/intvl)
884
- TSS, TP, N, TKN, BOD, OP
885
-
886
- Allocation of a constituent from catchment to downstream catchment
887
- TSS, TP, N, TKN, BOD, OP
888
-
889
-
890
-
891
-
892
- '''
893
-
894
- #reach_id = 103
895
- #def make_catchment(reach_id,uci,hbn):
896
-
897
-
898
-
899
-
900
- # class Reach:
901
-
902
-
903
- # class Perlnd():
904
- # def __init__(catchment_id,perlnd_id,area,mlno,landcover,metzone):
905
-
906
-
907
-
908
- # # class Implnd:
909
- # def annual_weighted_perlnd_output(uci,hbn,ts_name,t_code = 4,opnids = None):
910
-
911
- # df = hbn.get_multiple_timeseries('PERLND',5,ts_name,opnids = opnids)
912
- # subwatersheds = uci.network.subwatersheds().reset_index()
913
- # subwatersheds = subwatersheds.loc[subwatersheds['SVOL'] == 'PERLND']
914
- # df = df[subwatersheds['SVOLNO']].T
915
- # df = pd.merge(df, subwatersheds, left_index = True, right_on='SVOLNO', how='inner')
916
- # df = df.set_index(['TVOLNO','SVOL','SVOLNO','AFACTR','LSID','MLNO']).T
917
-
918
- # def annual_weighted_output(ts_name,operation,opnids):
919
- # assert(operation in ['PERLND','IMPLND'])
920
- # subwatersheds = uci.network.subwatersheds()
921
- # subwatersheds = subwatersheds.loc[subwatersheds['SVOL'] == operation].reset_index()
922
- # df = cal.model.hbns.get_multiple_timeseries('PERLND',5,'PERO',test['SVOLNO'].values).mean().reset_index()
923
- # df.columns = ['OPNID','value']
924
- # df = pd.merge(subwatersheds,df,left_on = 'SVOLNO', right_on='OPNID')
925
- # weighted_mean = df.groupby('TVOLNO').apply(lambda x: (x['value'] * x['AFACTR']).sum() / x['AFACTR'].sum())
926
734
 
927
735
 
928
736
  def weighted_mean(df,value_col,weight_col):
@@ -931,9 +739,9 @@ def weighted_mean(df,value_col,weight_col):
931
739
  'AFACTR' : df[weight_col].sum(),
932
740
  value_col: [weighted_mean]})
933
741
 
934
- def annual_weighted_output(uci,hbn,ts_name,operation = 'PERLND',opnids = None,group_by = None):
742
+ def annual_weighted_output(uci,hbn,ts_name,operation = 'PERLND',t_code = 5,opnids = None,group_by = None):
935
743
  assert (group_by in [None,'landcover','opnid'])
936
- df = hbn.get_multiple_timeseries(operation,5,ts_name,opnids = opnids).mean().reset_index()
744
+ df = hbn.get_multiple_timeseries(operation,t_code,ts_name,opnids = opnids).mean().reset_index()
937
745
  df.columns = ['SVOLNO',ts_name]
938
746
  subwatersheds = uci.network.subwatersheds().reset_index()
939
747
  subwatersheds = subwatersheds.loc[subwatersheds['SVOL'] == operation]
@@ -952,7 +760,6 @@ def annual_weighted_output(uci,hbn,ts_name,operation = 'PERLND',opnids = None,gr
952
760
  df = df.set_index([df.index,'AFACTR'])
953
761
  return df
954
762
 
955
-
956
763
 
957
764
  def monthly_weighted_output(uci,hbn,ts_name,operation = 'PERLND',opnids = None, as_rate = False, by_landcover = True, months = [1,2,3,4,5,6,7,8,9,10,11,12]):
958
765
  df = hbn.get_multiple_timeseries(operation,4,ts_name,opnids = opnids)
@@ -1097,7 +904,7 @@ def subwatershed_weighted_output(uci,hbn,reach_ids,ts_name,time_step,by_landcove
1097
904
 
1098
905
 
1099
906
 
1100
- #%% Phosphorous Loading
907
+ #%% Phosphorous Loading Calculations
1101
908
  def subwatershed_total_phosphorous_loading(uci,hbn,reach_ids = None,t_code=5, as_load = True,group_landcover = True):
1102
909
  tp_loading = total_phosphorous(uci,hbn,t_code)
1103
910
  if reach_ids is None:
@@ -1131,21 +938,22 @@ def subwatershed_total_phosphorous_loading(uci,hbn,reach_ids = None,t_code=5, as
1131
938
  total.index = pd.to_datetime(total.index)
1132
939
  return total
1133
940
 
1134
- def total_phosphorous(uci,hbn,t_code):
941
+ def total_phosphorous(uci,hbn,t_code,operation = 'PERLND'):
1135
942
  #assert(isinstance(perlnd_ids (int,list,None)))
1136
- perlnds = uci.network.subwatersheds()
1137
- perlnds = perlnds.loc[perlnds['SVOL'] == 'PERLND'].drop_duplicates(subset = ['SVOLNO','MLNO'])
943
+ opnids = uci.network.subwatersheds()
944
+ opnids = opnids.loc[opnids['SVOL'] == operation].drop_duplicates(subset = ['SVOLNO','MLNO'])
1138
945
 
1139
946
  totals = []
1140
- for mlno in perlnds['MLNO'].unique():
1141
- perlnd_ids = perlnds['SVOLNO'].loc[perlnds['MLNO'] == mlno].to_list()
1142
- total = dissolved_orthophosphate(uci,hbn,mlno,t_code) + particulate_orthophosphate(uci,hbn,mlno, t_code) + organic_refactory_phosphorous(uci,hbn,mlno,t_code) + labile_oxygen_demand(uci,hbn,mlno,t_code)*0.007326 # Conversation factor to P
1143
- totals.append(total[perlnd_ids])
947
+ for mlno in opnids['MLNO'].unique():
948
+ total = dissolved_orthophosphate(uci,hbn,operation,mlno,t_code) + particulate_orthophosphate(uci,hbn,operation,mlno, t_code) + organic_refactory_phosphorous(uci,hbn,operation,mlno,t_code) + labile_oxygen_demand(uci,hbn,operation,mlno,t_code)*0.007326 # Conversation factor to P
949
+ if not isinstance(total,float): #TODO fix for when no data is present. Don't like this workaround.
950
+ totals.append(total[opnids['SVOLNO'].loc[opnids['MLNO'] == mlno].to_list()])
1144
951
 
1145
952
  total = pd.concat(totals,axis=1)
1146
953
  total = total.T.groupby(total.columns).sum().T
1147
954
  return total
1148
955
 
956
+
1149
957
  MASSLINK_SCHEME = {'dissolved_orthophosphate': {'tmemn': 'NUIF1',
1150
958
  'tmemsb1': '4',
1151
959
  'tmemsb2':''},
@@ -1169,12 +977,14 @@ MASSLINK_SCHEME = {'dissolved_orthophosphate': {'tmemn': 'NUIF1',
1169
977
  'tmemsb2':''}}
1170
978
 
1171
979
 
1172
- def qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2 = '',t_code = 4):
980
+
981
+ def qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2 = '',t_code = 4):
1173
982
  masslink = uci.table('MASS-LINK',f'MASS-LINK{mlno}')
1174
983
  masslink = masslink.loc[(masslink['TMEMN'] == tmemn) & (masslink['TMEMSB1'] == tmemsb1) & (masslink['TMEMSB2'] == tmemsb2)]
984
+ masslink.fillna({'MFACTOR': 1}, inplace=True)
1175
985
  ts = 0
1176
986
  for index,row in masslink.iterrows():
1177
- hbn_name = uci.table('PERLND','QUAL-PROPS', int(row['SMEMSB1']) - 1).iloc[0]['QUALID']
987
+ hbn_name = uci.table(operation,'QUAL-PROPS', int(row['SMEMSB1']) - 1).iloc[0]['QUALID']
1178
988
  hbn_name = row['SMEMN'] + hbn_name
1179
989
  mfactor = row['MFACTOR']
1180
990
  ts = hbn.get_multiple_timeseries(row['SVOL'],t_code,hbn_name)*mfactor + ts
@@ -1183,49 +993,48 @@ def qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2 = '',t_code = 4):
1183
993
 
1184
994
 
1185
995
 
1186
- def dissolved_orthophosphate(uci,hbn,mlno,t_code = 4):
996
+ def dissolved_orthophosphate(uci,hbn,operation,mlno,t_code = 4):
1187
997
  tmemn = MASSLINK_SCHEME['dissolved_orthophosphate']['tmemn']
1188
998
  tmemsb1 = MASSLINK_SCHEME['dissolved_orthophosphate']['tmemsb1']
1189
999
  tmemsb2 = MASSLINK_SCHEME['dissolved_orthophosphate']['tmemsb2']
1190
- return qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1191
-
1192
- def particulate_orthophosphate(uci,hbn,mlno,t_code = 4):
1193
- ts = particulate_orthophosphate_sand(uci,hbn,mlno,t_code) + particulate_orthophosphate_silt(uci,hbn,mlno,t_code) + particulate_orthophosphate_clay(uci,hbn,mlno,t_code)
1194
- return ts
1000
+ return qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1195
1001
 
1196
- def particulate_orthophosphate_sand(uci,hbn, mlno,t_code = 4):
1002
+ def particulate_orthophosphate_sand(uci,hbn,operation,mlno,t_code = 4):
1197
1003
  tmemn = MASSLINK_SCHEME['particulate_orthophosphate_sand']['tmemn']
1198
1004
  tmemsb1 = MASSLINK_SCHEME['particulate_orthophosphate_sand']['tmemsb1']
1199
1005
  tmemsb2 = MASSLINK_SCHEME['particulate_orthophosphate_sand']['tmemsb2']
1200
- return qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1006
+ return qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1201
1007
 
1202
- def particulate_orthophosphate_silt(uci,hbn, mlno,t_code = 4):
1008
+ def particulate_orthophosphate_silt(uci,hbn,operation, mlno,t_code = 4):
1203
1009
  tmemn = MASSLINK_SCHEME['particulate_orthophosphate_silt']['tmemn']
1204
1010
  tmemsb1 = MASSLINK_SCHEME['particulate_orthophosphate_silt']['tmemsb1']
1205
1011
  tmemsb2 = MASSLINK_SCHEME['particulate_orthophosphate_silt']['tmemsb2']
1206
- return qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1012
+ return qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1207
1013
 
1208
- def particulate_orthophosphate_clay(uci,hbn, mlno,t_code = 4):
1014
+ def particulate_orthophosphate_clay(uci,hbn, operation,mlno,t_code = 4):
1209
1015
  tmemn = MASSLINK_SCHEME['particulate_orthophosphate_clay']['tmemn']
1210
1016
  tmemsb1 = MASSLINK_SCHEME['particulate_orthophosphate_clay']['tmemsb1']
1211
1017
  tmemsb2 = MASSLINK_SCHEME['particulate_orthophosphate_clay']['tmemsb2']
1212
- return qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1018
+ return qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1213
1019
 
1214
- def organic_refactory_phosphorous(uci,hbn, mlno,t_code = 4):
1020
+ def organic_refactory_phosphorous(uci,hbn, operation,mlno,t_code = 4):
1215
1021
  tmemn = MASSLINK_SCHEME['organic_refactory_phosphorous']['tmemn']
1216
1022
  tmemsb1 = MASSLINK_SCHEME['organic_refactory_phosphorous']['tmemsb1']
1217
1023
  tmemsb2 = MASSLINK_SCHEME['organic_refactory_phosphorous']['tmemsb2']
1218
- return qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1024
+ return qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1219
1025
 
1220
- def organic_refactory_carbon(uci,hbn, mlno,t_code = 4):
1026
+ def organic_refactory_carbon(uci,hbn, operation,mlno,t_code = 4):
1221
1027
  tmemn = MASSLINK_SCHEME['organic_refactory_carbon']['tmemn']
1222
1028
  tmemsb1 = MASSLINK_SCHEME['organic_refactory_carbon']['tmemsb1']
1223
1029
  tmemsb2 = MASSLINK_SCHEME['organic_refactory_carbon']['tmemsb2']
1224
- return qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1030
+ return qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1225
1031
 
1226
- def labile_oxygen_demand(uci,hbn,mlno,t_code = 4):
1032
+ def labile_oxygen_demand(uci,hbn,operation,mlno,t_code = 4):
1227
1033
  tmemn = MASSLINK_SCHEME['labile_oxygen_demand']['tmemn']
1228
1034
  tmemsb1 = MASSLINK_SCHEME['labile_oxygen_demand']['tmemsb1']
1229
1035
  tmemsb2 = MASSLINK_SCHEME['labile_oxygen_demand']['tmemsb2']
1230
- return qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1036
+ return qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1231
1037
 
1038
+ def particulate_orthophosphate(uci,hbn,operation,mlno,t_code = 4):
1039
+ ts = particulate_orthophosphate_sand(uci,hbn,operation,mlno,t_code) + particulate_orthophosphate_silt(uci,hbn,operation,mlno,t_code) + particulate_orthophosphate_clay(uci,hbn,operation,mlno,t_code)
1040
+ return ts