hspf 2.1.0__py3-none-any.whl → 2.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
hspf/reports.py CHANGED
@@ -6,7 +6,7 @@ Created on Mon Apr 11 08:26:04 2022
6
6
  """
7
7
  import numpy as np
8
8
  import pandas as pd
9
- from . import helpers
9
+ from hspf import helpers
10
10
  from pathlib import Path
11
11
 
12
12
  #timeseries_catalog = pd.read_csv(Path(__file__).parent/'TIMESERIES_CATALOG.csv')
@@ -37,22 +37,31 @@ class Reports():
37
37
  else:
38
38
  return annual_reach_water_budget(self.uci,self.hbns)
39
39
 
40
- # def annual_runoff(self):
41
- # #assert operation in ['PERLND','IMPLND']
42
- # #if operation == 'PERLND':
43
- # return annual_perlnd_runoff(self.uci,self.hbns)
44
- # #else:
45
- # # raise NotImplementedError()
40
+ def watershed_loading(self,constituent,reach_ids,upstream_reach_ids = None,by_landcover = False):
41
+ '''
42
+ Calculate the loading to channels from a watershed.
46
43
 
47
- # def monthly_runoff(self,landcover=None):
48
- # df = monthly_perlnd_runoff(self.uci,self.hbns).unstack().T
49
- # if landcover is None:
50
- # return df
51
- # else:
52
- # return df.loc[landcover]
53
-
54
- def annual_sediment_budget(self):
55
- return annual_sediment_budget(self.uci,self.hbns)
44
+ Parameters
45
+ ----------
46
+ constituent : str
47
+ Constituent to calculate loading for (e.g. 'TP', 'TSS', 'N', 'OP', 'Q', 'TKN')
48
+ reach_ids : list
49
+ List of reach IDs defining the watershed outlet
50
+ upstream_reach_ids : list, optional
51
+ List of reach IDs defining the upstream boundary of the watershed. The default is None.
52
+ by_landcover : bool, optional
53
+ If True, returns loading by landcover type. The default is False.
54
+ '''
55
+ return get_watershed_loading(self.uci,self.hbns,reach_ids,constituent,upstream_reach_ids,by_landcover)
56
+
57
+ def catchment_loading(self,constituent,by_landcover = False):
58
+ return get_catchment_loading(self.uci,self.hbns,constituent,by_landcover)
59
+
60
+ def contributions(self,constituent,target_reach_id):
61
+ return total_contributions(constituent,self.uci,self.hbns,target_reach_id)
62
+
63
+ def landcover_contributions(self,constituent,target_reach_id,landcover = None):
64
+ return catchment_contributions(self.uci,self.hbns,constituent,target_reach_id)
56
65
 
57
66
  def ann_avg_subwatershed_loading(self,constituent):
58
67
  return ann_avg_subwatershed_loading(constituent,self.uci, self.hbns)
@@ -68,33 +77,17 @@ class Reports():
68
77
  total['share'] = total['volume_percent']/total['area_percent']
69
78
  return total
70
79
 
71
- # def monthly_avg_subwatershed_loading(self,constituent,month):
72
- # return monthly_avg_subwatershed_loading(constituent,month,self.uci, self.hbns)
73
-
74
- # def monthly_avg_watershed_loading(self,constituent,reach_ids,month,by_landcover = True):
75
- # return monthly_avg_watershed_loading(constituent,reach_ids,month,self.uci, self.hbns,by_landcover = by_landcover)
76
-
77
-
78
- def ann_avg_yield(self,constituent,reach_ids):
79
- df= avg_ann_yield(self.uci,self.hbns,constituent,reach_ids)
80
+ def ann_avg_yield(self,constituent,reach_ids,upstream_reach_ids = None):
81
+ df= avg_ann_yield(self.uci,self.hbns,constituent,reach_ids,upstream_reach_ids)
80
82
  return df
81
83
 
82
84
  def annual_precip(self):
83
85
  return avg_annual_precip(self.uci,self.wdms)
84
86
 
85
- # def water_balance(self,reach_ids = None):
86
- # if reach_ids is None:
87
- # reach_ids = self.uci.network.outlets()
88
- # return water_balance(self.uci,self.hbns,self.wdms,reach_ids)
89
-
90
87
  def simulated_et(self):
91
88
  return simulated_et(self.uci,self.hbns)
92
89
 
93
- # def inflows(self):
94
- # return inflows(self.uci,self.wdms)
95
-
96
-
97
-
90
+
98
91
 
99
92
  #%% Channel Reports
100
93
  def scour(hbn,uci,start_year = '1996',end_year = '2030'):
@@ -175,37 +168,218 @@ def get_catchments(uci,reach_ids):
175
168
  return landcover
176
169
 
177
170
 
178
- #%% Catchment Loading (ie. Direct contributions from perlnds/implnds, no losses)
179
- # Q
180
- # Subwatershed Weighted Mean Timeseries Output
181
- # operation = 'PERLND'
182
- # ts_name = 'PERO',
183
- # time_code = 5
184
171
 
172
+ #%% Landscape Yields
173
+
174
+ def yield_flow(uci,hbn,constituent,reach_id):
175
+ hbn.get_rchres_data('Q',reach_id,'cfs','yearly')/uci.network.drainage_area(reach_id)
176
+
177
+
178
+ def yield_sediment(uci,hbn,constituent,reach_id):
179
+ hbn.get_rchres_data('TSS',reach_id,'lb','yearly').mean()*2000/uci.network.drainage_area(reach_id)
180
+
181
+ def avg_ann_yield(uci,hbn,constituent,reach_ids,upstream_reach_ids = None):
182
+ #reach_ids = uci.network.G.nodes
183
+
184
+ reach_ids = uci.network.get_opnids('RCHRES',reach_ids,upstream_reach_ids)
185
+ area = uci.network.drainage_area(reach_ids,upstream_reach_ids)
186
+
187
+ if constituent == 'Q':
188
+ units = 'acrft'
189
+ else:
190
+ units = 'lb'
191
+
192
+ df = hbn.get_reach_constituent(constituent,reach_ids,5,unit =units).mean() # Gross
193
+
194
+ return df/area
195
+
196
+ #%% Catchment and Watershed Loading
197
+
198
+ def landcover_areas(uci):
199
+ df = uci.network.operation_area('PERLND').groupby('LSID').sum()
200
+ df['percent'] = 100*(df['AFACTR']/df['AFACTR'].sum())
201
+ return df
185
202
 
186
- LOADING_MAP = {'Q' : [{'t_opn':'PERLND',
187
- 't_con': 'PERO',
188
- 't_code': 'yearly',
189
- 'activity': 'PWATER'}],
190
- 'TSS': [{'t_opn':'PERLND',
191
- 't_con': 'SOSED',
192
- 't_code': 'yearly',
193
- 'activity': 'SEDMNT'},
194
- {'t_opn':'IMPLND',
195
- 't_con': 'SOSED',
196
- 't_code': 'yearly',
197
- 'activity': 'SEDMNT'}]}
203
+ def catchment_areas(uci):
204
+ df = uci.network.subwatersheds().reset_index()
205
+ df = df.groupby('TVOLNO')['AFACTR'].sum().reset_index()
206
+ df.rename(columns = {'AFACTR':'catchment_area'},inplace = True)
207
+ return df
198
208
 
209
+ def get_constituent_loading(uci,hbn,constituent,time_step = 5):
199
210
 
200
- # def annual_average_subwatershed_loading(constituent,uci,hbn,reach_ids):
201
- # '''
202
211
 
203
- # For each subwatershed the annual average loading rate
212
+ if constituent == 'TP':
213
+ perlnds = total_phosphorous(uci,hbn,t_code=time_step,operation = 'PERLND').reset_index().melt(id_vars = ['index'],var_name = 'OPNID')
214
+ implnds = total_phosphorous(uci,hbn,t_code=time_step,operation = 'IMPLND').reset_index().melt(id_vars = ['index'],var_name = 'OPNID')
215
+ else:
216
+ perlnds = hbn.get_perlnd_constituent(constituent,time_step = time_step).reset_index().melt(id_vars = ['index'],var_name = 'OPNID')
217
+ implnds = hbn.get_implnd_constituent(constituent,time_step = time_step).reset_index().melt(id_vars = ['index'],var_name = 'OPNID')
204
218
 
205
- # For each subwatershed the average loading rate for a specific month
219
+ perlnds['OPERATION'] = 'PERLND'
220
+ implnds['OPERATION'] = 'IMPLND'
221
+
222
+ df = pd.concat([perlnds,implnds],axis=0)
223
+
224
+ #df = df.groupby(['OPNID','OPERATION'])['value'].mean().reset_index()
225
+
206
226
 
227
+ # units = 'lb/acre'
228
+ if constituent == 'Q':
229
+ df.loc[:, 'value'] = df['value']/12 # convert to ft/acre/month
230
+ # units = 'ft/acre'
207
231
 
208
- # '''
232
+ # df['unit'] = units
233
+ # df.rename(columns = {'index':'datetime','value': 'loading_rate'},inplace = True)
234
+ # df['constituent'] = constituent
235
+ # df['time_step'] = time_step
236
+ # df['year'] = pd.DatetimeIndex(df['datetime']).year
237
+ # df['month'] = pd.DatetimeIndex(df['datetime']).month
238
+
239
+
240
+ subwatersheds = uci.network.subwatersheds().reset_index()
241
+ subwatersheds = subwatersheds.loc[subwatersheds['SVOL'].isin(['PERLND','IMPLND'])]
242
+ areas = catchment_areas(uci)
243
+
244
+ df = pd.merge(subwatersheds,df,left_on = ['SVOL','SVOLNO'], right_on=['OPERATION','OPNID'],how='right')
245
+ df = pd.merge(df,areas,left_on = ['TVOLNO'], right_on='TVOLNO',how='left')
246
+ df['load'] = df['value']*df['AFACTR']
247
+ df = df.rename(columns = {'value':'loading_rate', 'AFACTR':'landcover_area','LSID':'landcover'})
248
+ df['constituent'] = constituent
249
+ return df[['index','constituent','TVOLNO','SVOLNO','SVOL','landcover','landcover_area','catchment_area','loading_rate','load']]
250
+
251
+
252
+ def get_catchment_loading(uci,hbn,constituent,by_landcover = False):
253
+ df = get_constituent_loading(uci,hbn,constituent)
254
+ if not by_landcover:
255
+ df = df.groupby(['TVOLNO','constituent'])[['landcover_area','load']].sum().reset_index()
256
+ df['loading_rate'] = df['load']/df['landcover_area']
257
+ return df
258
+
259
+ def get_watershed_loading(uci,hbn,reach_ids,constituent,upstream_reach_ids = None,by_landcover = False):
260
+ reach_ids = uci.network.get_opnids('RCHRES',reach_ids,upstream_reach_ids)
261
+
262
+ df = get_constituent_loading(uci,hbn,constituent)
263
+ df = df.loc[df['TVOLNO'].isin(reach_ids)]
264
+
265
+ if by_landcover:
266
+ df = df.groupby(['landcover','constituent'])[['landcover_area','load']].sum().reset_index()
267
+ df['loading_rate'] = df['load']/df['landcover_area']
268
+ else:
269
+ df = df.groupby(['constituent'])[['landcover_area','load']].sum().reset_index()
270
+ df['loading_rate'] = df['load']/df['landcover_area']
271
+
272
+ return df
273
+
274
+
275
+ #%% Contributions
276
+ allocation_selector = {'Q': {'input': ['IVOL'],
277
+ 'output': ['ROVOL']},
278
+ 'TP': {'input': ['PTOTIN'],
279
+ 'output': ['PTOTOUT']},
280
+ 'TSS': {'input': ['ISEDTOT'],
281
+ 'output': ['ROSEDTOT']},
282
+ 'OP': {'input': ['PO4INDIS'],
283
+ 'output': ['PO4OUTDIS']},
284
+ 'N': {'input': ['NO3INTOT','NO2INTOT'],
285
+ 'output': ['NO2OUTTOT','NO3OUTTOT']},
286
+ 'TKN': {'input': ['TAMINTOT','NTOTORGIN'],
287
+ 'output': ['TAMOUTTOT', 'NTOTORGOUT']}
288
+ }
289
+
290
+ def channel_inflows(constituent,uci,hbn,t_code,reach_ids = None):
291
+ load_in = sum([hbn.get_multiple_timeseries('RCHRES',
292
+ t_code,
293
+ t_cons,
294
+ opnids = reach_ids)
295
+ for t_cons in allocation_selector[constituent]['input']])
296
+
297
+ if constituent == 'TSS':
298
+ load_in = load_in*2000
299
+
300
+ return load_in
301
+
302
+ def channel_outflows(constituent,uci,hbn,t_code,reach_ids = None):
303
+ load_out = sum([hbn.get_multiple_timeseries('RCHRES',
304
+ t_code,
305
+ t_cons,
306
+ opnids = reach_ids)
307
+ for t_cons in allocation_selector[constituent]['output']])
308
+ if constituent == 'TSS':
309
+ load_out = load_out*2000
310
+ return load_out
311
+
312
+ def channel_fate(constituent,uci,hbn,t_code,reach_ids = None):
313
+ load_in = channel_inflows(constituent,uci,hbn,t_code,reach_ids)
314
+ load_out = channel_outflows(constituent,uci,hbn,t_code,reach_ids)
315
+ return load_out/load_in
316
+
317
+
318
+ def local_loading(constituent,uci,hbn,t_code,reach_ids = None):
319
+ load_in = channel_inflows(constituent,uci,hbn,t_code,reach_ids)
320
+ load_out = channel_outflows(constituent,uci,hbn,t_code,reach_ids)
321
+ df = pd.DataFrame({reach_id: load_in[reach_id] - load_out[uci.network.upstream(reach_id)].sum(axis=1) for reach_id in load_in.columns})
322
+ return df
323
+
324
+
325
+
326
+ def catchment_contributions(uci,hbn,constituent,target_reach_id, landcover = None):
327
+ p = uci.network.paths(target_reach_id)
328
+ p[target_reach_id] = [target_reach_id]
329
+ fate = channel_fate(constituent,uci,hbn,5)
330
+ fate_factors = pd.concat([fate[v].prod(axis=1) for k,v in p.items()],axis=1)
331
+ fate_factors.columns = list(p.keys())
332
+
333
+ fate_factors = fate_factors.reset_index().melt(id_vars = 'index')
334
+
335
+ df = get_catchment_loading(uci,hbn,constituent,by_landcover = True)
336
+ df = pd.merge(df,fate_factors,left_on = ['TVOLNO','index'],right_on = ['variable','index'])
337
+
338
+ df['contribution'] = df['value']*df['load']
339
+
340
+ target_load = channel_outflows(constituent,uci,hbn,5,[target_reach_id])
341
+
342
+ df = pd.merge(df,target_load.reset_index().melt(id_vars='index',var_name = 'target_reach',value_name = 'target_load'),left_on='index',right_on='index')
343
+ df['contribution_perc'] = df['contribution']/(df['target_load'])*100
344
+
345
+ df = df.groupby(['TVOLNO','landcover','landcover_area'])[['load','contribution','contribution_perc','target_load']].mean().reset_index()
346
+
347
+ if landcover is not None:
348
+ df = df.loc[df['landcover'] == landcover]
349
+
350
+ else:
351
+ df = df.groupby(['TVOLNO',])[['landcover_area','load','contribution','contribution_perc']].sum().reset_index()
352
+
353
+ return df
354
+
355
+ def total_contributions(constituent,uci,hbn,target_reach_id, as_percent = True):
356
+ p = uci.network.paths(target_reach_id)
357
+ p[target_reach_id] = [target_reach_id]
358
+ fate = channel_fate(constituent,uci,hbn,5)
359
+ loads = local_loading(constituent,uci,hbn,5)
360
+ fate_factors = pd.concat([fate[v].prod(axis=1) for k,v in p.items()],axis=1)
361
+ fate_factors.columns = list(p.keys())
362
+ loads = loads[loads.columns.intersection(fate_factors.columns)]
363
+ contribution = loads[fate_factors.columns].mul(fate_factors.values)
364
+ #allocations = loads.mul(loss_factors[loads.columns.get_level_values('reach_id')].values)
365
+
366
+ target_load = channel_outflows(constituent,uci,hbn,5,[target_reach_id])
367
+
368
+
369
+ df = contribution.mean().to_frame().reset_index()
370
+ df.columns = ['TVOLNO','contribution']
371
+
372
+ df['load'] = loads.mean().values
373
+ df['contribution_perc'] = (contribution.div(target_load.values)*100).mean().values
374
+ return df[['TVOLNO','load','contribution','contribution_perc']]
375
+
376
+ #%% LEGACY Catchment Loading (ie. Direct contributions from perlnds/implnds, no losses)
377
+ # Q
378
+ # Subwatershed Weighted Mean Timeseries Output
379
+ # operation = 'PERLND'
380
+ # ts_name = 'PERO',
381
+ # time_code = 5
382
+
209
383
 
210
384
  def avg_subwatershed_loading(constituent,t_code,uci,hbn):
211
385
  dfs = []
@@ -235,8 +409,7 @@ def avg_subwatershed_loading(constituent,t_code,uci,hbn):
235
409
  loading_rates.append(df.loc[subwatershed.index].sum().agg(agg_func)/subwatershed['AFACTR'].sum())
236
410
 
237
411
 
238
-
239
-
412
+
240
413
  def weighted_describe(df, value_col, weight_col):
241
414
  weighted_mean = (df[value_col] * df[weight_col]).sum() / df[weight_col].sum()
242
415
  weighted_var = ((df[value_col] - weighted_mean) ** 2 * df[weight_col]).sum() / df[weight_col].sum()
@@ -335,244 +508,7 @@ def ann_avg_watershed_loading(constituent,reach_ids,uci,hbn, by_landcover = Fals
335
508
  df = weighted_describe(df,constituent,'AFACTR')
336
509
 
337
510
  return df
338
-
339
-
340
511
 
341
- # ds = xr.
342
- # coords = ['time']
343
- # dims = ['operation','activity','opnid','time_step','time','catchment_id']
344
- # def _insert_col(col_name,value,df):
345
- # if col_name not in df.columns:
346
- # df.insert(0,col_name,value)
347
-
348
- # dfs = []
349
- # for hbn in hbns.hbns:
350
- # for key, df in hbn.data_frames.items():
351
- # operation,activity,opnid,t_code = key.split('_')
352
- # t_code = int(t_code)
353
- # opnid = int(opnid)
354
- # df = hbn.data_frames[key]
355
- # df.index.name = 'date'
356
- # df.index = df.index.tz_localize(None)
357
- # _insert_col('t_code',t_code,df)
358
- # _insert_col('OPNID',opnid,df)
359
- # _insert_col('activity',activity,df)
360
- # df = df.reset_index().set_index(['date','OPNID','t_code','activity'])
361
- # dfs.append(xr.Dataset.from_dataframe(df))
362
-
363
- # ds = xr.merge(dfs)
364
-
365
- # query = {
366
- # 'date': (142.41, 142.51),
367
- # 'y': (-32.22, -32.32),
368
- # 'time': ('2015-01-01', '2016-12-31'),
369
- # 'measurements': ['nbart_nir', 'fmask'],
370
- # 'output_crs': 'EPSG:3577',
371
- # 'resolution': (-30, 30)
372
- # }
373
-
374
- # dfs = []
375
- # for activity, ts_names in hbn.output_names().items():
376
- # dfs
377
-
378
- # for hbn in hbn.hbns: data_frames['PERLND_SEDMNT_201_5']
379
-
380
-
381
- # time_steps = [2,3,4,5]
382
- # operations = ['PERLND','IMPLND','RCHRES']
383
-
384
-
385
-
386
- # # def flow_loading(uci,hbn,reach_ids,time_step='yearly',weighted = True):
387
-
388
- # t_con = 'PERO'
389
- # t_opn = 'PERLND'
390
- # time_step = 'yearly'
391
- # activity = 'PWATER'
392
-
393
-
394
-
395
- #def total_phosphorous_loading:
396
- # def phosphorous_loading(uci,hbns,reach_ids,time_tep = 'yearly'):
397
- # catchments = get_catchments(uci,reach_ids)
398
- # df = total_phosphorous(uci,hbns)
399
-
400
- # subwatershed = uci.network.subwatershed(reach_id)
401
- # perlnds = subwatershed.loc[subwatershed['SVOL'] == 'PERLND']
402
- # perlnds = perlnds.set_index('SVOLNO').drop_duplicates()
403
- # mlno = subwatershed.loc[subwatershed['SVOL'] == 'PERLND','MLNO'].iloc[0]
404
- # total = total_phosphorous(uci,hbn,mlno,t_code,perlnds.index)
405
-
406
-
407
-
408
-
409
- #%% Landscape Yields
410
-
411
- def yield_flow(uci,hbn,constituent,reach_id):
412
- hbn.get_rchres_data('Q',reach_id,'cfs','yearly')/uci.network.drainage_area(reach_id)
413
-
414
-
415
- def yield_sediment(uci,hbn,constituent,reach_id):
416
- hbn.get_rchres_data('TSS',reach_id,'lb','yearly').mean()*2000/uci.network.drainage_area(reach_id)
417
-
418
- def avg_ann_yield(uci,hbn,constituent,reach_ids):
419
- #reach_ids = uci.network.G.nodes
420
-
421
-
422
- _reach_ids = [uci.network._upstream(reach) for reach in reach_ids]
423
- _reach_ids = list(set([num for row in _reach_ids for num in row]))
424
- subwatersheds = uci.network.subwatersheds().loc[_reach_ids]
425
- area = subwatersheds['AFACTR'].sum()
426
-
427
- if constituent == 'Q':
428
- units = 'acrft'
429
- else:
430
- units = 'lb'
431
-
432
- df = hbn.get_reach_constituent(constituent,reach_ids,5,unit =units).mean() # Gross
433
-
434
- return df/area
435
-
436
-
437
- #%% Allocations
438
- allocation_selector = {'Q': {'input': ['IVOL'],
439
- 'output': ['ROVOL']},
440
- 'TP': {'input': ['PTOTIN'],
441
- 'output': ['PTOTOUT']},
442
- 'TSS': {'input': ['ISEDTOT'],
443
- 'output': ['ROSEDTOT']},
444
- 'OP': {'input': ['PO4INDIS'],
445
- 'output': ['PO4OUTDIS']},
446
- 'N': {'input': ['NO3INTOT','NO2INTOT'],
447
- 'output': ['NO2OUTTOT','NO3OUTTOT']},
448
- 'TKN': {'input': [],
449
- 'output': ['TAMOUTTOT', 'NTOTORGOUT']}
450
- }
451
-
452
- def fate(hbn,constituent,t_code,reach_ids = None):
453
- if constituent == 'Q':
454
- fate_in = hbn.get_multiple_timeseries('RCHRES',t_code,'ROVOL',opnids=reach_ids)
455
- fate_out = hbn.get_multiple_timeseries('RCHRES',t_code,'IVOL',opnids=reach_ids)
456
- elif constituent == 'TP':
457
- fate_in = hbn.get_multiple_timeseries('RCHRES',t_code,'PTOTOUT',opnids = reach_ids)
458
- fate_out = hbn.get_multiple_timeseries('RCHRES',t_code,'PTOTIN',opnids = reach_ids)
459
- elif constituent == 'TSS':
460
- fate_in = hbn.get_multiple_timeseries('RCHRES',t_code,'ISEDTOT',opnids = reach_ids)
461
- fate_out = hbn.get_multiple_timeseries('RCHRES',t_code,'ROSEDTOT',opnids = reach_ids)
462
- return fate_out/fate_in
463
-
464
- def loading(uci,hbn,constituent,t_code = 5):
465
- if constituent =='TP':
466
- loads = total_phosphorous(uci,hbn,t_code=t_code)
467
- else:
468
- #dfs = []
469
- # df_implnd = hbn.get_implnd_constituent(constituent,t_code,'lb').T.reset_index().rename(columns = {'index':'OPNID'})
470
- # df_implnd['SVOL'] = 'IMPLND'
471
-
472
- loads = hbn.get_perlnd_constituent(constituent,t_code,'lb')
473
-
474
-
475
- # .T.reset_index().rename(columns = {'index':'OPNID'})
476
- # df_perlnd['SVOL'] = 'PERLND'
477
-
478
- # df = pd.concat([df_perlnd,df_implnd])
479
- # df.set_index(['SVOL','OPNID'],inplace=True)
480
-
481
- if constituent == 'TSS':
482
- loads = loads*2000
483
-
484
- return loads
485
-
486
- def subwatershed_loading(uci,hbn,constituent,t_code,group_landcover = True,as_load = True):
487
- loads = loading(uci,hbn,constituent,t_code)
488
-
489
- subwatersheds = uci.network.subwatersheds()
490
- perlnds = subwatersheds.loc[subwatersheds['SVOL'] == 'PERLND'].reset_index()
491
-
492
- total = loads[perlnds['SVOLNO'].to_list()]
493
- total = total.mul(perlnds['AFACTR'].values,axis=1)
494
- total = total.transpose()
495
- total['reach_id'] = perlnds['TVOLNO'].values
496
- total['landcover'] = uci.table('PERLND','GEN-INFO').loc[total.index,'LSID'].to_list()
497
- total['area'] = perlnds['AFACTR'].to_list() #perlnds.loc[total.index,'AFACTR'].to_list()
498
- total = total.reset_index().set_index(['index','landcover','area','reach_id']).transpose()
499
- total.columns.names = ['perlnd_id','landcover','area','reach_id']
500
-
501
- if group_landcover:
502
- total.columns = total.columns.droplevel(['landcover','perlnd_id'])
503
- total = total.T.reset_index().groupby('reach_id').sum().reset_index().set_index(['reach_id','area']).T
504
-
505
- if not as_load:
506
- total = total.div(total.columns.get_level_values('area').values,axis=1)
507
-
508
- total.index = pd.to_datetime(total.index)
509
- return total
510
-
511
-
512
- def losses(uci,hbn,constituent, t_code = 5):
513
- upstream_reachs = {reach_id: uci.network.upstream(reach_id) for reach_id in uci.network.get_node_type_ids('RCHRES')}
514
- totout = sum([hbn.get_multiple_timeseries('RCHRES',
515
- t_code,
516
- t_cons,
517
- opnids = list(upstream_reachs.keys()))
518
- for t_cons in allocation_selector[constituent]['output']])
519
-
520
- totin = sum([hbn.get_multiple_timeseries('RCHRES',
521
- t_code,
522
- t_cons,
523
- opnids = list(upstream_reachs.keys()))
524
- for t_cons in allocation_selector[constituent]['input']])
525
-
526
-
527
- #totin = totout.copy().astype('Float64')
528
- #totin[:] = pd.NA
529
-
530
- for reach_id in totin.columns:
531
- reach_ids = upstream_reachs[reach_id]
532
- if len(reach_ids) > 0:
533
- totin[reach_id] = totout[reach_ids].sum(axis=1)
534
-
535
- #totin.columns = totout.columns
536
- return (totout-totin)/totin*100
537
-
538
- def allocations(uci,hbn,constituent,reach_id,t_code,group_landcover = True):
539
- p = uci.network.paths(reach_id)
540
- p[reach_id] = [reach_id]
541
- loss = losses(uci,hbn,constituent,t_code)
542
- loads = subwatershed_loading(uci,hbn,constituent,t_code,group_landcover = group_landcover)
543
- loss_factors = pd.concat([loss[v].prod(axis=1) for k,v in p.items()],axis=1)
544
- loss_factors.columns = list(p.keys())
545
- allocations = loads.mul(loss_factors[loads.columns.get_level_values('reach_id')].values)
546
- return allocations
547
-
548
-
549
- def total_phosphorous_losses(uci,hbn,t_code = 5):
550
- upstream_reachs = {reach_id: [reach_id] + uci.network.upstream(reach_id) for reach_id in uci.network.get_node_type_ids('RCHRES')}
551
- ptotout = hbn.get_multiple_timeseries('RCHRES',t_code,'PTOTOUT',opnids = list(upstream_reachs.keys()))
552
- ptotin = pd.concat([ptotout[reach_ids].sum(axis=1) for reach_id,reach_ids in upstream_reachs.items()],axis=1)
553
- ptotin.columns = list(upstream_reachs.keys())
554
- return 1-(ptotin-ptotout)/ptotin
555
-
556
-
557
- def total_phosphorous_allocations(uci,hbn,reach_id,t_code=5,group_landcover = True):
558
- p = uci.network.paths(reach_id)
559
- p[reach_id] = [reach_id]
560
- losses = total_phosphorous_losses(uci,hbn,t_code)
561
- loads = subwatershed_total_phosphorous_loading(uci,hbn,t_code=t_code,group_landcover = group_landcover)
562
- loss_factors = pd.concat([losses[v].prod(axis=1) for k,v in p.items()],axis=1)
563
- loss_factors.columns = list(p.keys())
564
- allocations = loads.mul(loss_factors[loads.columns.get_level_values('reach_id')].values)
565
- return allocations
566
-
567
- #loads[loads.index.get_level_values('reach_id').isin(loss_factors.columns)].mul(loss_factors.values,axis=1)
568
- #return loads[loss_factors.columns].mul(loss_factors.values,axis=1)
569
-
570
-
571
- def flow_allocations(uci,hbn,reach_id,t_code = 5):
572
- raise NotImplementedError()
573
-
574
- def total_suspended_sediment_allocations(uci,hbn,reach_id,t_code):
575
- raise NotImplementedError()
576
512
 
577
513
  #%% Water Balance
578
514
  def pevt_balance(mod,operation,opnid):
@@ -790,139 +726,9 @@ def avg_annual_precip(uci,wdm):
790
726
 
791
727
 
792
728
  #%%
793
- #%%% Report Tablewater_s
729
+ #%%% Other Reports
794
730
 
795
731
 
796
- def landcover_areas(uci):
797
- df = uci.network.operation_area('PERLND').groupby('LSID').sum()
798
- df['percent'] = 100*(df['AFACTR']/df['AFACTR'].sum())
799
- return df
800
-
801
- # def area_weighted_output(uci,hbn,ts_name,operation,time_step,opnids):
802
- # assert(operation in ['PERLND','IMPLND'])
803
- # df = hbn.get_multiple_timeseries(operation,5,ts_name,opnids = opnids).T
804
- # df.index.name = 'SVOLNO'
805
- # areas = uci.network.operation_area(operation)
806
- # df = df.join(areas).reset_index().set_index(['SVOLNO','AFACTR','LSID'])
807
- # df = df.T*df.index.get_level_values('AFACTR').values
808
-
809
- # if grouped:
810
- # df.columns.get_level_values('AFACTR').groupby(df.get_level_values['LSIDE'])
811
-
812
- '''
813
- Output for each PERLND
814
- - Sometimes a rate
815
- - Sometimes a mass or volume
816
-
817
- - Group by Landcover no without weighting
818
- - rate take the mean
819
- - mass or volum us the sum
820
- - Group by Landcover with weighting
821
- - rate convert to mass/volume sum then divide by grouped area
822
- - mass sum then divide by grouped area
823
-
824
-
825
- Output for a catchment
826
- - For a single catchment
827
- - if timeseries is a rate
828
- - rate is raw output
829
- - mass/volume is rate * area of contributing operations
830
- - if timeseries is a mass/volume
831
- - rate is mass/volume / area of contributing operations
832
- - mass/volume is raw output
833
- - No ability to aggregate by Landcover
834
- - For 2 or more catchments
835
- - if weighted
836
- - if timeseries is a rate
837
- - rate is rate*area of contributing operations summed by landcover and divided by each landcover area
838
- - mass/volume is rate*area summed by landcover and area
839
- - if timeseries is a mass/volume
840
- - rate is mass/volume summed by landcover and divided by landcover area
841
- - mass/volume is mass/volume summed by landcover
842
- - if not weighted
843
- - if timeseries is a rate
844
- - rate is the raw output of each catchment concatenated
845
- - mass/volume is rate*area of each contributing landcover and concatenated for each catchment
846
- - if timeseries is a mass/volume
847
- - rate is mass/volume / area of each contributing landcover and concatenated for each catchment
848
- - mass/volume is raw output of each chatchment concatenated
849
-
850
-
851
- '''
852
-
853
- # class Catchment:
854
- # def __init__(reach_id,uci,hbn = None):
855
- # id = reach_id
856
-
857
- # def loading_rate(constituent):
858
-
859
- # def loading(constituent):
860
-
861
- # def yield(constituent):
862
-
863
- # def load(constituent):
864
-
865
-
866
-
867
-
868
- '''
869
- The area of each landcategory in the catchment
870
-
871
- Loading rate of each landuse (lb/acre/intvl)
872
- TSS, TP, N, TKN, BOD, OP
873
-
874
- Loading of from each landuse (lb/intvl)
875
- TSS, TP, N, TKN, BOD, OP
876
-
877
- Yield at the catchment outlet (lb/acr/intvl)
878
- TSS, TP, N, TKN, BOD, OP
879
-
880
- Load at the catchment outlet (lb/intvl)
881
- TSS, TP, N, TKN, BOD, OP
882
-
883
- In channel losses of a constituent (lb/intvl)
884
- TSS, TP, N, TKN, BOD, OP
885
-
886
- Allocation of a constituent from catchment to downstream catchment
887
- TSS, TP, N, TKN, BOD, OP
888
-
889
-
890
-
891
-
892
- '''
893
-
894
- #reach_id = 103
895
- #def make_catchment(reach_id,uci,hbn):
896
-
897
-
898
-
899
-
900
- # class Reach:
901
-
902
-
903
- # class Perlnd():
904
- # def __init__(catchment_id,perlnd_id,area,mlno,landcover,metzone):
905
-
906
-
907
-
908
- # # class Implnd:
909
- # def annual_weighted_perlnd_output(uci,hbn,ts_name,t_code = 4,opnids = None):
910
-
911
- # df = hbn.get_multiple_timeseries('PERLND',5,ts_name,opnids = opnids)
912
- # subwatersheds = uci.network.subwatersheds().reset_index()
913
- # subwatersheds = subwatersheds.loc[subwatersheds['SVOL'] == 'PERLND']
914
- # df = df[subwatersheds['SVOLNO']].T
915
- # df = pd.merge(df, subwatersheds, left_index = True, right_on='SVOLNO', how='inner')
916
- # df = df.set_index(['TVOLNO','SVOL','SVOLNO','AFACTR','LSID','MLNO']).T
917
-
918
- # def annual_weighted_output(ts_name,operation,opnids):
919
- # assert(operation in ['PERLND','IMPLND'])
920
- # subwatersheds = uci.network.subwatersheds()
921
- # subwatersheds = subwatersheds.loc[subwatersheds['SVOL'] == operation].reset_index()
922
- # df = cal.model.hbns.get_multiple_timeseries('PERLND',5,'PERO',test['SVOLNO'].values).mean().reset_index()
923
- # df.columns = ['OPNID','value']
924
- # df = pd.merge(subwatersheds,df,left_on = 'SVOLNO', right_on='OPNID')
925
- # weighted_mean = df.groupby('TVOLNO').apply(lambda x: (x['value'] * x['AFACTR']).sum() / x['AFACTR'].sum())
926
732
 
927
733
 
928
734
  def weighted_mean(df,value_col,weight_col):
@@ -931,9 +737,9 @@ def weighted_mean(df,value_col,weight_col):
931
737
  'AFACTR' : df[weight_col].sum(),
932
738
  value_col: [weighted_mean]})
933
739
 
934
- def annual_weighted_output(uci,hbn,ts_name,operation = 'PERLND',opnids = None,group_by = None):
740
+ def annual_weighted_output(uci,hbn,ts_name,operation = 'PERLND',t_code = 5,opnids = None,group_by = None):
935
741
  assert (group_by in [None,'landcover','opnid'])
936
- df = hbn.get_multiple_timeseries(operation,5,ts_name,opnids = opnids).mean().reset_index()
742
+ df = hbn.get_multiple_timeseries(operation,t_code,ts_name,opnids = opnids).mean().reset_index()
937
743
  df.columns = ['SVOLNO',ts_name]
938
744
  subwatersheds = uci.network.subwatersheds().reset_index()
939
745
  subwatersheds = subwatersheds.loc[subwatersheds['SVOL'] == operation]
@@ -952,7 +758,6 @@ def annual_weighted_output(uci,hbn,ts_name,operation = 'PERLND',opnids = None,gr
952
758
  df = df.set_index([df.index,'AFACTR'])
953
759
  return df
954
760
 
955
-
956
761
 
957
762
  def monthly_weighted_output(uci,hbn,ts_name,operation = 'PERLND',opnids = None, as_rate = False, by_landcover = True, months = [1,2,3,4,5,6,7,8,9,10,11,12]):
958
763
  df = hbn.get_multiple_timeseries(operation,4,ts_name,opnids = opnids)
@@ -1097,7 +902,7 @@ def subwatershed_weighted_output(uci,hbn,reach_ids,ts_name,time_step,by_landcove
1097
902
 
1098
903
 
1099
904
 
1100
- #%% Phosphorous Loading
905
+ #%% Phosphorous Loading Calculations
1101
906
  def subwatershed_total_phosphorous_loading(uci,hbn,reach_ids = None,t_code=5, as_load = True,group_landcover = True):
1102
907
  tp_loading = total_phosphorous(uci,hbn,t_code)
1103
908
  if reach_ids is None:
@@ -1131,21 +936,21 @@ def subwatershed_total_phosphorous_loading(uci,hbn,reach_ids = None,t_code=5, as
1131
936
  total.index = pd.to_datetime(total.index)
1132
937
  return total
1133
938
 
1134
- def total_phosphorous(uci,hbn,t_code):
939
+ def total_phosphorous(uci,hbn,t_code,operation = 'PERLND'):
1135
940
  #assert(isinstance(perlnd_ids (int,list,None)))
1136
- perlnds = uci.network.subwatersheds()
1137
- perlnds = perlnds.loc[perlnds['SVOL'] == 'PERLND'].drop_duplicates(subset = ['SVOLNO','MLNO'])
941
+ opnids = uci.network.subwatersheds()
942
+ opnids = opnids.loc[opnids['SVOL'] == operation].drop_duplicates(subset = ['SVOLNO','MLNO'])
1138
943
 
1139
944
  totals = []
1140
- for mlno in perlnds['MLNO'].unique():
1141
- perlnd_ids = perlnds['SVOLNO'].loc[perlnds['MLNO'] == mlno].to_list()
1142
- total = dissolved_orthophosphate(uci,hbn,mlno,t_code) + particulate_orthophosphate(uci,hbn,mlno, t_code) + organic_refactory_phosphorous(uci,hbn,mlno,t_code) + labile_oxygen_demand(uci,hbn,mlno,t_code)*0.007326 # Conversation factor to P
1143
- totals.append(total[perlnd_ids])
945
+ for mlno in opnids['MLNO'].unique():
946
+ total = dissolved_orthophosphate(uci,hbn,operation,mlno,t_code) + particulate_orthophosphate(uci,hbn,operation,mlno, t_code) + organic_refactory_phosphorous(uci,hbn,operation,mlno,t_code) + labile_oxygen_demand(uci,hbn,operation,mlno,t_code)*0.007326 # Conversation factor to P
947
+ totals.append(total[opnids['SVOLNO'].loc[opnids['MLNO'] == mlno].to_list()])
1144
948
 
1145
949
  total = pd.concat(totals,axis=1)
1146
950
  total = total.T.groupby(total.columns).sum().T
1147
951
  return total
1148
952
 
953
+
1149
954
  MASSLINK_SCHEME = {'dissolved_orthophosphate': {'tmemn': 'NUIF1',
1150
955
  'tmemsb1': '4',
1151
956
  'tmemsb2':''},
@@ -1169,12 +974,13 @@ MASSLINK_SCHEME = {'dissolved_orthophosphate': {'tmemn': 'NUIF1',
1169
974
  'tmemsb2':''}}
1170
975
 
1171
976
 
1172
- def qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2 = '',t_code = 4):
977
+ def qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2 = '',t_code = 4):
1173
978
  masslink = uci.table('MASS-LINK',f'MASS-LINK{mlno}')
1174
979
  masslink = masslink.loc[(masslink['TMEMN'] == tmemn) & (masslink['TMEMSB1'] == tmemsb1) & (masslink['TMEMSB2'] == tmemsb2)]
980
+ masslink.fillna({'MFACTOR': 1}, inplace=True)
1175
981
  ts = 0
1176
982
  for index,row in masslink.iterrows():
1177
- hbn_name = uci.table('PERLND','QUAL-PROPS', int(row['SMEMSB1']) - 1).iloc[0]['QUALID']
983
+ hbn_name = uci.table(operation,'QUAL-PROPS', int(row['SMEMSB1']) - 1).iloc[0]['QUALID']
1178
984
  hbn_name = row['SMEMN'] + hbn_name
1179
985
  mfactor = row['MFACTOR']
1180
986
  ts = hbn.get_multiple_timeseries(row['SVOL'],t_code,hbn_name)*mfactor + ts
@@ -1183,49 +989,48 @@ def qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2 = '',t_code = 4):
1183
989
 
1184
990
 
1185
991
 
1186
- def dissolved_orthophosphate(uci,hbn,mlno,t_code = 4):
992
+ def dissolved_orthophosphate(uci,hbn,operation,mlno,t_code = 4):
1187
993
  tmemn = MASSLINK_SCHEME['dissolved_orthophosphate']['tmemn']
1188
994
  tmemsb1 = MASSLINK_SCHEME['dissolved_orthophosphate']['tmemsb1']
1189
995
  tmemsb2 = MASSLINK_SCHEME['dissolved_orthophosphate']['tmemsb2']
1190
- return qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1191
-
1192
- def particulate_orthophosphate(uci,hbn,mlno,t_code = 4):
1193
- ts = particulate_orthophosphate_sand(uci,hbn,mlno,t_code) + particulate_orthophosphate_silt(uci,hbn,mlno,t_code) + particulate_orthophosphate_clay(uci,hbn,mlno,t_code)
1194
- return ts
996
+ return qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1195
997
 
1196
- def particulate_orthophosphate_sand(uci,hbn, mlno,t_code = 4):
998
+ def particulate_orthophosphate_sand(uci,hbn,operation,mlno,t_code = 4):
1197
999
  tmemn = MASSLINK_SCHEME['particulate_orthophosphate_sand']['tmemn']
1198
1000
  tmemsb1 = MASSLINK_SCHEME['particulate_orthophosphate_sand']['tmemsb1']
1199
1001
  tmemsb2 = MASSLINK_SCHEME['particulate_orthophosphate_sand']['tmemsb2']
1200
- return qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1002
+ return qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1201
1003
 
1202
- def particulate_orthophosphate_silt(uci,hbn, mlno,t_code = 4):
1004
+ def particulate_orthophosphate_silt(uci,hbn,operation, mlno,t_code = 4):
1203
1005
  tmemn = MASSLINK_SCHEME['particulate_orthophosphate_silt']['tmemn']
1204
1006
  tmemsb1 = MASSLINK_SCHEME['particulate_orthophosphate_silt']['tmemsb1']
1205
1007
  tmemsb2 = MASSLINK_SCHEME['particulate_orthophosphate_silt']['tmemsb2']
1206
- return qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1008
+ return qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1207
1009
 
1208
- def particulate_orthophosphate_clay(uci,hbn, mlno,t_code = 4):
1010
+ def particulate_orthophosphate_clay(uci,hbn, operation,mlno,t_code = 4):
1209
1011
  tmemn = MASSLINK_SCHEME['particulate_orthophosphate_clay']['tmemn']
1210
1012
  tmemsb1 = MASSLINK_SCHEME['particulate_orthophosphate_clay']['tmemsb1']
1211
1013
  tmemsb2 = MASSLINK_SCHEME['particulate_orthophosphate_clay']['tmemsb2']
1212
- return qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1014
+ return qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1213
1015
 
1214
- def organic_refactory_phosphorous(uci,hbn, mlno,t_code = 4):
1016
+ def organic_refactory_phosphorous(uci,hbn, operation,mlno,t_code = 4):
1215
1017
  tmemn = MASSLINK_SCHEME['organic_refactory_phosphorous']['tmemn']
1216
1018
  tmemsb1 = MASSLINK_SCHEME['organic_refactory_phosphorous']['tmemsb1']
1217
1019
  tmemsb2 = MASSLINK_SCHEME['organic_refactory_phosphorous']['tmemsb2']
1218
- return qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1020
+ return qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1219
1021
 
1220
- def organic_refactory_carbon(uci,hbn, mlno,t_code = 4):
1022
+ def organic_refactory_carbon(uci,hbn, operation,mlno,t_code = 4):
1221
1023
  tmemn = MASSLINK_SCHEME['organic_refactory_carbon']['tmemn']
1222
1024
  tmemsb1 = MASSLINK_SCHEME['organic_refactory_carbon']['tmemsb1']
1223
1025
  tmemsb2 = MASSLINK_SCHEME['organic_refactory_carbon']['tmemsb2']
1224
- return qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1026
+ return qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1225
1027
 
1226
- def labile_oxygen_demand(uci,hbn,mlno,t_code = 4):
1028
+ def labile_oxygen_demand(uci,hbn,operation,mlno,t_code = 4):
1227
1029
  tmemn = MASSLINK_SCHEME['labile_oxygen_demand']['tmemn']
1228
1030
  tmemsb1 = MASSLINK_SCHEME['labile_oxygen_demand']['tmemsb1']
1229
1031
  tmemsb2 = MASSLINK_SCHEME['labile_oxygen_demand']['tmemsb2']
1230
- return qualprop_transform(uci,hbn,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1032
+ return qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2,t_code)
1231
1033
 
1034
+ def particulate_orthophosphate(uci,hbn,operation,mlno,t_code = 4):
1035
+ ts = particulate_orthophosphate_sand(uci,hbn,operation,mlno,t_code) + particulate_orthophosphate_silt(uci,hbn,operation,mlno,t_code) + particulate_orthophosphate_clay(uci,hbn,operation,mlno,t_code)
1036
+ return ts