hspf 2.1.1__py3-none-any.whl → 2.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
hspf/reports.py CHANGED
@@ -25,8 +25,8 @@ class Reports():
25
25
  return scour(self.hbns,self.uci,start_year = start_year,end_year=end_year)
26
26
 
27
27
  # Hydrology Reports
28
- def landcover_area(self):
29
- return landcover_areas(self.uci)
28
+ def landcover_area(self,reach_ids,upstream_reach_ids = None):
29
+ return watershed_landcover_areas(self.uci,reach_ids,upstream_reach_ids)
30
30
 
31
31
  def annual_water_budget(self,operation):
32
32
  assert operation in ['PERLND','RCHRES','IMPLND']
@@ -36,10 +36,37 @@ class Reports():
36
36
  return annual_implnd_water_budget(self.uci,self.hbns)
37
37
  else:
38
38
  return annual_reach_water_budget(self.uci,self.hbns)
39
+
40
+ def annual_precip(self):
41
+ return avg_annual_precip(self.uci,self.wdms)
42
+
43
+ def simulated_et(self):
44
+ return simulated_et(self.uci,self.hbns)
45
+
46
+ def annual_perlnd_runoff(self,reach_ids = None,upstream_reach_ids = None,start_year = 1996,end_year = 2100):
47
+ if (reach_ids is None) and (upstream_reach_ids is None):
48
+ opnids = None
49
+ else:
50
+ opnids = self.uci.network.get_opnids('PERLND',reach_ids,upstream_reach_ids)
51
+ return annual_perlnd_runoff(self.uci,self.hbns,opnids,start_year,end_year)
52
+
53
+ #% Catchment and Watershed Loading (Edge of Field Load) Reports
54
+ #
55
+ def average_annual_catchment_loading(self,constituent,by_landcover = False,start_year = 1996,end_year = 2100):
56
+ return average_annual_catchment_loading(self.uci,self.hbns,constituent,by_landcover = by_landcover,start_year = start_year,end_year = end_year)
57
+
58
+ def average_monthly_catchment_loading(self,constituent,by_landcover = False,start_year = 1996,end_year = 2100):
59
+ return average_monthly_catchment_loading(self.uci,self.hbns,constituent,by_landcover = by_landcover,start_year = start_year,end_year = end_year)
60
+
61
+ def average_annual_watershed_loading(self,constituent,reach_ids,upstream_reach_ids = None, start_year = 1996, end_year = 2100, by_landcover = False,drainage_area = None):
62
+ return average_annual_watershed_loading(self.uci,self.hbns,constituent,reach_ids,upstream_reach_ids, start_year, end_year, by_landcover,drainage_area)
63
+
64
+ def average_monthly_watershed_loading(self,constituent,reach_ids,upstream_reach_ids = None, start_year = 1996, end_year = 2100, by_landcover = False,drainage_area = None):
65
+ return average_monthly_watershed_loading(self.uci,self.hbns,constituent,reach_ids,upstream_reach_ids, start_year, end_year, by_landcover,drainage_area)
39
66
 
40
67
  def watershed_loading(self,constituent,reach_ids,upstream_reach_ids = None,by_landcover = False):
41
68
  '''
42
- Calculate the loading to channels from a watershed.
69
+ Calculate the edge of field loading to channels from each catchment within the watershed defined by reach_ids and upstream_reach_ids.
43
70
 
44
71
  Parameters
45
72
  ----------
@@ -57,40 +84,40 @@ class Reports():
57
84
  def catchment_loading(self,constituent,by_landcover = False):
58
85
  return get_catchment_loading(self.uci,self.hbns,constituent,by_landcover)
59
86
 
87
+ # def average_annual_subwatershed_loading(self,constituent):
88
+ # return ann_avg_subwatershed_loading(constituent,self.uci, self.hbns)
89
+
90
+ # def average_annual_watershed_loading(self,constituent,reach_ids):
91
+ # landcovers = ann_avg_watershed_loading(constituent,reach_ids,self.uci, self.hbns, True)
92
+ # total = ann_avg_watershed_loading(constituent,reach_ids,self.uci, self.hbns, False)
93
+ # total.index = ['Total']
94
+ # total = pd.concat([landcovers,total])
95
+ # total['volume'] = total['area']*total[f'weighted_mean_{constituent}']
96
+ # total['volume_percent'] = total['volume']/total.loc['Total','volume']*100
97
+ # total['area_percent'] = total['area']/total.loc['Total','area']*100
98
+ # total['share'] = total['volume_percent']/total['area_percent']
99
+ # return total
100
+ # Contributions Reports
60
101
  def contributions(self,constituent,target_reach_id):
61
102
  return total_contributions(constituent,self.uci,self.hbns,target_reach_id)
62
103
 
63
104
  def landcover_contributions(self,constituent,target_reach_id,landcover = None):
64
105
  return catchment_contributions(self.uci,self.hbns,constituent,target_reach_id)
65
106
 
66
- def ann_avg_subwatershed_loading(self,constituent):
67
- return ann_avg_subwatershed_loading(constituent,self.uci, self.hbns)
68
-
69
- def ann_avg_watershed_loading(self,constituent,reach_ids):
70
- landcovers = ann_avg_watershed_loading(constituent,reach_ids,self.uci, self.hbns, True)
71
- total = ann_avg_watershed_loading(constituent,reach_ids,self.uci, self.hbns, False)
72
- total.index = ['Total']
73
- total = pd.concat([landcovers,total])
74
- total['volume'] = total['area']*total[f'weighted_mean_{constituent}']
75
- total['volume_percent'] = total['volume']/total.loc['Total','volume']*100
76
- total['area_percent'] = total['area']/total.loc['Total','area']*100
77
- total['share'] = total['volume_percent']/total['area_percent']
78
- return total
107
+ # Landscape Yield Reports
79
108
 
80
- def ann_avg_yield(self,constituent,reach_ids,upstream_reach_ids = None):
81
- df= avg_ann_yield(self.uci,self.hbns,constituent,reach_ids,upstream_reach_ids)
109
+ def average_annual_yield(self,constituent,reach_ids,upstream_reach_ids = None,start_year = 1996,end_year = 2100):
110
+ df= average_annual_yield(self.uci,self.hbns,constituent,reach_ids,upstream_reach_ids,start_year,end_year)
111
+ return df
112
+
113
+ def average_monthly_yield(self,constituent,reach_ids,upstream_reach_ids = None,start_year = 1996,end_year = 2100):
114
+ df= average_monthly_yield(self.uci,self.hbns,constituent,reach_ids,upstream_reach_ids,start_year,end_year)
82
115
  return df
83
116
 
84
- def annual_precip(self):
85
- return avg_annual_precip(self.uci,self.wdms)
86
-
87
- def simulated_et(self):
88
- return simulated_et(self.uci,self.hbns)
89
-
90
-
117
+
91
118
 
92
119
  #%% Channel Reports
93
- def scour(hbn,uci,start_year = '1996',end_year = '2030'):
120
+ def scour(hbn,uci,start_year = 1996,end_year = 2030):
94
121
  # Should eventually create an entire reports module or class indorder to calculate all of the different model checks
95
122
  # TODO: Incorporate IMPLNDS
96
123
  schematic = uci.table('SCHEMATIC').copy()
@@ -103,7 +130,7 @@ def scour(hbn,uci,start_year = '1996',end_year = '2030'):
103
130
  activity = 'SEDMNT',
104
131
  t_code = 'yearly',
105
132
  opnids = None)
106
- sosed = sosed.loc[(sosed.index > start_year) & (sosed.index < end_year)].mean().rename('mean').to_frame()
133
+ sosed = sosed.loc[(sosed.index.year >= start_year) & (sosed.index.year <= end_year)].mean().rename('mean').to_frame()
107
134
 
108
135
  sosld = hbn.get_multiple_timeseries(t_opn = 'IMPLND',
109
136
  t_con = 'SOSLD',
@@ -124,7 +151,9 @@ def scour(hbn,uci,start_year = '1996',end_year = '2030'):
124
151
  scour_report = []
125
152
  # schematic block will have all the possible perlands while sosed only has perlands that were simulated
126
153
  # in other words information from sosed is a subset of schematic
127
- for tvolno in lakeflag.index: #schematic['TVOLNO'].unique():
154
+ for tvolno in lakeflag.index.intersection(uci.opnid_dict['RCHRES'].index): #schematic['TVOLNO'].unique():
155
+ implnd_load = 0
156
+ prlnd_load = 0
128
157
  reach_load = depscr.loc[tvolno].values[0]
129
158
  schem_sub = schematic[schematic['TVOLNO'] == tvolno]
130
159
  if len(schem_sub) == 0:
@@ -169,43 +198,91 @@ def get_catchments(uci,reach_ids):
169
198
 
170
199
 
171
200
 
172
- #%% Landscape Yields
201
+ #%% Constituent Loads
173
202
 
174
- def yield_flow(uci,hbn,constituent,reach_id):
175
- hbn.get_rchres_data('Q',reach_id,'cfs','yearly')/uci.network.drainage_area(reach_id)
203
+ def _constituent_load(hbn,constituent,time_step = 5):
204
+ #reach_ids = uci.network.G.nodes
205
+ if constituent == 'Q':
206
+ units = 'acrft'
207
+ else:
208
+ units = 'lb'
176
209
 
210
+ df = hbn.get_rchres_output(constituent,units,time_step)
177
211
 
178
- def yield_sediment(uci,hbn,constituent,reach_id):
179
- hbn.get_rchres_data('TSS',reach_id,'lb','yearly').mean()*2000/uci.network.drainage_area(reach_id)
212
+ return df
180
213
 
181
- def avg_ann_yield(uci,hbn,constituent,reach_ids,upstream_reach_ids = None):
214
+ def constituent_load(hbn,constituent,reach_ids,time_step = 5,upstream_reach_ids = None):
182
215
  #reach_ids = uci.network.G.nodes
183
-
184
- reach_ids = uci.network.get_opnids('RCHRES',reach_ids,upstream_reach_ids)
185
- area = uci.network.drainage_area(reach_ids,upstream_reach_ids)
186
-
187
216
  if constituent == 'Q':
188
217
  units = 'acrft'
189
218
  else:
190
219
  units = 'lb'
220
+
221
+ upstream_load = 0
222
+ if upstream_reach_ids is not None:
223
+ upstream_load = constituent_load(hbn,constituent,upstream_reach_ids,time_step)
224
+
225
+ df = hbn.get_reach_constituent(constituent,reach_ids,time_step,unit =units) - upstream_load
226
+
227
+ return df
228
+
229
+ #%% Landscape Yields
230
+
231
+ def _constituent_yield(uci,hbn,constituent,time_step = 5):
232
+ df = _constituent_load(hbn,constituent,time_step)
233
+
234
+ areas = [uci.network.drainage_area([reach_id]) for reach_id in df.columns]
235
+ return df/areas
236
+
237
+ def constituent_yield(uci,hbn,constituent,reach_ids,time_step = 5,upstream_reach_ids = None,drainage_area = None):
238
+ #reach_ids = uci.network.G.nodes
239
+
240
+ if drainage_area is None:
241
+ drainage_area = uci.network.drainage_area(reach_ids,upstream_reach_ids)
242
+
243
+ df = constituent_load(hbn,constituent,reach_ids,time_step,upstream_reach_ids)/drainage_area
191
244
 
192
- df = hbn.get_reach_constituent(constituent,reach_ids,5,unit =units).mean() # Gross
245
+ return df
246
+
247
+ def average_annual_yield(uci,hbn,constituent,reach_ids,upstream_reach_ids = None,start_year = 1996,end_year = 2100,drainage_area = None):
248
+ df = constituent_yield(uci,hbn,constituent,reach_ids,5,upstream_reach_ids,drainage_area)
249
+ df = df.loc[(df.index.year >= start_year) & (df.index.year <= end_year)].mean()
250
+ return df
251
+
252
+ def average_monthly_yield(uci,hbn,constituent,reach_ids,upstream_reach_ids = None,start_year = 1996,end_year = 2100,drainage_area = None):
253
+ df = constituent_yield(uci,hbn,constituent,reach_ids,4,upstream_reach_ids,drainage_area)
254
+ df = df.loc[(df.index.year >= start_year) & (df.index.year <= end_year)]
255
+ df = df.groupby(df.index.month).mean()
256
+ return df
193
257
 
194
- return df/area
195
258
 
196
- #%% Catchment and Watershed Loading
259
+
260
+ #%% Catchment and Watershed Loading (Edge of Field Load)
197
261
 
198
- def landcover_areas(uci):
199
- df = uci.network.operation_area('PERLND').groupby('LSID').sum()
200
- df['percent'] = 100*(df['AFACTR']/df['AFACTR'].sum())
262
+ def watershed_landcover_areas(uci,reach_ids,upstream_reach_ids = None):
263
+ df = uci.network.drainage_area_landcover(reach_ids,upstream_reach_ids,group=True).reset_index()
264
+ df['percent'] = 100*(df['area']/df['area'].sum())
265
+ return df
266
+
267
+ def catchment_landcover_areas(uci,reach_ids = None):
268
+ df = uci.network.subwatersheds().reset_index()[['TVOLNO','SVOL','LSID','AFACTR']]
269
+ df.rename(columns = {'AFACTR':'area',
270
+ 'TVOLNO':'catchment_id',
271
+ 'LSID':'landcover',
272
+ 'SVOL':'landcover_type'},inplace = True)
273
+ if reach_ids is not None:
274
+ df = df.loc[df['catchment_id'].isin(reach_ids)]
201
275
  return df
202
276
 
277
+
278
+
203
279
  def catchment_areas(uci):
204
280
  df = uci.network.subwatersheds().reset_index()
205
281
  df = df.groupby('TVOLNO')['AFACTR'].sum().reset_index()
206
282
  df.rename(columns = {'AFACTR':'catchment_area'},inplace = True)
207
283
  return df
208
284
 
285
+
209
286
  def get_constituent_loading(uci,hbn,constituent,time_step = 5):
210
287
 
211
288
 
@@ -235,42 +312,170 @@ def get_constituent_loading(uci,hbn,constituent,time_step = 5):
235
312
  # df['time_step'] = time_step
236
313
  # df['year'] = pd.DatetimeIndex(df['datetime']).year
237
314
  # df['month'] = pd.DatetimeIndex(df['datetime']).month
315
+ return df
316
+
317
+
318
+ # subwatersheds = uci.network.subwatersheds().reset_index()
319
+ # subwatersheds = subwatersheds.loc[subwatersheds['SVOL'].isin(['PERLND','IMPLND'])]
320
+ # areas = catchment_areas(uci)
321
+
322
+ # df = pd.merge(subwatersheds,df,left_on = ['SVOL','SVOLNO'], right_on=['OPERATION','OPNID'],how='right')
323
+ # df = pd.merge(df,areas,left_on = ['TVOLNO'], right_on='TVOLNO',how='left')
324
+ # df['load'] = df['value']*df['AFACTR']
325
+ # df = df.rename(columns = {'value':'loading_rate', 'AFACTR':'landcover_area','LSID':'landcover'})
326
+ # df['constituent'] = constituent
327
+ # return df[['index','constituent','TVOLNO','SVOLNO','SVOL','landcover','landcover_area','catchment_area','loading_rate','load']]
328
+ def _join_catchments(df,uci,constituent):
329
+ subwatersheds = uci.network.subwatersheds().reset_index()
330
+ subwatersheds = subwatersheds.loc[subwatersheds['SVOL'].isin(['PERLND','IMPLND'])]
331
+ areas = catchment_areas(uci)
238
332
 
333
+ df = pd.merge(
334
+ subwatersheds, df,
335
+ left_on=['SVOL', 'SVOLNO'],
336
+ right_on=['OPERATION', 'OPNID'],
337
+ how='inner'
338
+ )
339
+ df = pd.merge(df, areas, on='TVOLNO', how='left')
340
+
341
+ df['load'] = df['value'] * df['AFACTR']
342
+ df = df.rename(columns={
343
+ 'value': 'loading_rate',
344
+ 'AFACTR': 'landcover_area',
345
+ 'LSID': 'landcover'
346
+ })
347
+ df['constituent'] = constituent
348
+ return df
239
349
 
350
+ def get_catchment_loading(uci,hbn,constituent,time_step=5,by_landcover = False):
351
+ df = get_constituent_loading(uci,hbn,constituent,time_step)
352
+
240
353
  subwatersheds = uci.network.subwatersheds().reset_index()
241
354
  subwatersheds = subwatersheds.loc[subwatersheds['SVOL'].isin(['PERLND','IMPLND'])]
242
355
  areas = catchment_areas(uci)
243
356
 
244
- df = pd.merge(subwatersheds,df,left_on = ['SVOL','SVOLNO'], right_on=['OPERATION','OPNID'],how='right')
357
+
358
+ df = pd.merge(subwatersheds,df,left_on = ['SVOL','SVOLNO'], right_on=['OPERATION','OPNID'],how='inner')
245
359
  df = pd.merge(df,areas,left_on = ['TVOLNO'], right_on='TVOLNO',how='left')
246
360
  df['load'] = df['value']*df['AFACTR']
247
361
  df = df.rename(columns = {'value':'loading_rate', 'AFACTR':'landcover_area','LSID':'landcover'})
248
362
  df['constituent'] = constituent
249
- return df[['index','constituent','TVOLNO','SVOLNO','SVOL','landcover','landcover_area','catchment_area','loading_rate','load']]
363
+ df = df[['index','constituent','TVOLNO','SVOLNO','SVOL','landcover','landcover_area','catchment_area','loading_rate','load']]
364
+ return df
365
+
366
+
367
+
368
+ def get_watershed_loading(uci,hbn,reach_ids,constituent,upstream_reach_ids = None,by_landcover = False,time_step = 5):
369
+ '''
370
+ Edge of field loading for all catchments within a watershed defined by reach_ids and upstream_reach_ids
371
+
372
+
373
+ '''
374
+ reach_ids = uci.network.get_opnids('RCHRES',reach_ids,upstream_reach_ids)
375
+
376
+ df = get_catchment_loading(uci,hbn,constituent,time_step)
377
+ df = df.loc[df['TVOLNO'].isin(reach_ids)]
378
+ return df
379
+
250
380
 
381
+ def average_annual_constituent_loading(uci,hbn,constituent,start_year = 1996,end_year = 2100):
382
+ df = get_constituent_loading(uci,hbn,constituent,time_step=5)
383
+ df = df.loc[(df['index'].dt.year >= start_year) & (df['index'].dt.year <= end_year)]
384
+ df['year'] = df['index'].dt.year
385
+ df = df.groupby(['OPERATION','OPNID'])['value'].mean().reset_index()
386
+ return df
387
+
388
+ def average_monthly_constituent_loading(uci,hbn,constituent,start_year = 1996,end_year = 2100):
389
+ df = get_constituent_loading(uci,hbn,constituent,time_step=4)
390
+ df = df.loc[(df['index'].dt.year >= start_year) & (df['index'].dt.year <= end_year)]
391
+ df['month'] = df['index'].dt.month
392
+ df = df.groupby(['month','OPERATION','OPNID'])['value'].mean().reset_index()
393
+ return df
251
394
 
252
- def get_catchment_loading(uci,hbn,constituent,by_landcover = False):
253
- df = get_constituent_loading(uci,hbn,constituent)
254
- if not by_landcover:
255
- df = df.groupby(['TVOLNO','constituent'])[['landcover_area','load']].sum().reset_index()
395
+ def average_annual_catchment_loading(uci,hbn,constituent,start_year = 1996,end_year = 2100,by_landcover = False):
396
+ df = average_annual_constituent_loading(uci,hbn,constituent,start_year,end_year)
397
+
398
+ subwatersheds = uci.network.subwatersheds().reset_index()
399
+ subwatersheds = subwatersheds.loc[subwatersheds['SVOL'].isin(['PERLND','IMPLND'])]
400
+ areas = catchment_areas(uci)
401
+
402
+
403
+ df = pd.merge(subwatersheds,df,left_on = ['SVOL','SVOLNO'], right_on=['OPERATION','OPNID'],how='inner')
404
+ df = pd.merge(df,areas,left_on = ['TVOLNO'], right_on='TVOLNO',how='left')
405
+ df['load'] = df['value']*df['AFACTR']
406
+ df = df.rename(columns = {'value':'loading_rate', 'AFACTR':'landcover_area','LSID':'landcover'})
407
+ df['constituent'] = constituent
408
+ df = df[['constituent','TVOLNO','SVOLNO','SVOL','landcover','landcover_area','catchment_area','loading_rate','load']]
409
+
410
+
411
+ if by_landcover:
412
+ df = df.groupby(['TVOLNO','landcover','constituent'])[['landcover_area','load']].sum().reset_index()
256
413
  df['loading_rate'] = df['load']/df['landcover_area']
414
+ else:
415
+ df = df.groupby(['TVOLNO','constituent','catchment_area'])[['load']].sum().reset_index()
416
+ df['loading_rate'] = df['load']/df['catchment_area']
257
417
  return df
258
418
 
259
- def get_watershed_loading(uci,hbn,reach_ids,constituent,upstream_reach_ids = None,by_landcover = False):
260
- reach_ids = uci.network.get_opnids('RCHRES',reach_ids,upstream_reach_ids)
419
+ def average_monthly_catchment_loading(uci,hbn,constituent,start_year = 1996,end_year = 2100,by_landcover = False):
420
+ df = average_monthly_constituent_loading(uci,hbn,constituent,start_year,end_year)
421
+
422
+ subwatersheds = uci.network.subwatersheds().reset_index()
423
+ subwatersheds = subwatersheds.loc[subwatersheds['SVOL'].isin(['PERLND','IMPLND'])]
424
+ areas = catchment_areas(uci)
425
+
261
426
 
262
- df = get_constituent_loading(uci,hbn,constituent)
427
+ df = pd.merge(subwatersheds,df,left_on = ['SVOL','SVOLNO'], right_on=['OPERATION','OPNID'],how='inner')
428
+ df = pd.merge(df,areas,left_on = ['TVOLNO'], right_on='TVOLNO',how='left')
429
+ df['load'] = df['value']*df['AFACTR']
430
+ df = df.rename(columns = {'value':'loading_rate', 'AFACTR':'landcover_area','LSID':'landcover'})
431
+ df['constituent'] = constituent
432
+ df = df[['month','constituent','TVOLNO','SVOLNO','SVOL','landcover','landcover_area','catchment_area','loading_rate','load']]
433
+
434
+
435
+ if by_landcover:
436
+ df = df.groupby(['month','TVOLNO','landcover','constituent'])[['landcover_area','load']].sum().reset_index()
437
+ df['loading_rate'] = df['load']/df['landcover_area']
438
+ else:
439
+ df = df.groupby(['month','TVOLNO','constituent','catchment_area'])[['load']].sum().reset_index()
440
+ df['loading_rate'] = df['load']/df['catchment_area']
441
+ return df
442
+
443
+
444
+
445
+ def average_annual_watershed_loading(uci,hbn,constituent,reach_ids, upstream_reach_ids = None, start_year = 1996, end_year = 2100, by_landcover = False,drainage_area = None):
446
+ df = average_annual_catchment_loading(uci,hbn,constituent,by_landcover = by_landcover,start_year = start_year,end_year = end_year)
447
+ reach_ids = uci.network.get_opnids('RCHRES',reach_ids,upstream_reach_ids)
263
448
  df = df.loc[df['TVOLNO'].isin(reach_ids)]
449
+ if drainage_area is None:
450
+ df['watershed_area'] = uci.network.drainage_area(reach_ids,upstream_reach_ids) #df.drop_duplicates(subset=['TVOLNO'])['catchment_area'].sum()
451
+ else:
452
+ df['watershed_area'] = drainage_area
264
453
 
265
454
  if by_landcover:
266
455
  df = df.groupby(['landcover','constituent'])[['landcover_area','load']].sum().reset_index()
267
456
  df['loading_rate'] = df['load']/df['landcover_area']
268
457
  else:
269
- df = df.groupby(['constituent'])[['landcover_area','load']].sum().reset_index()
270
- df['loading_rate'] = df['load']/df['landcover_area']
458
+ df = df.groupby(['constituent','watershed_area'])[['load']].sum().reset_index()
459
+ df['loading_rate'] = df['load']/df['watershed_area']
271
460
 
272
461
  return df
273
462
 
463
+ def average_monthly_watershed_loading(uci,hbn,constituent,reach_ids, upstream_reach_ids = None, start_year = 1996, end_year = 2100, by_landcover = False,drainage_area = None):
464
+ df = average_monthly_catchment_loading(uci,hbn,constituent,by_landcover = by_landcover,start_year = start_year,end_year = end_year)
465
+ reach_ids = uci.network.get_opnids('RCHRES',reach_ids,upstream_reach_ids)
466
+ df = df.loc[df['TVOLNO'].isin(reach_ids)]
467
+ if drainage_area is None:
468
+ df['watershed_area'] = uci.network.drainage_area(reach_ids,upstream_reach_ids) #df.drop_duplicates(subset=['TVOLNO'])['catchment_area'].sum()
469
+ else:
470
+ df['watershed_area'] = drainage_area
471
+
472
+ if by_landcover:
473
+ df = df.groupby(['month','TVOLNO','landcover','constituent'])[['landcover_area','load']].sum().reset_index()
474
+ df['loading_rate'] = df['load']/df['landcover_area']
475
+ else:
476
+ df = df.groupby(['month','constituent','watershed_area'])['load'].sum().reset_index()
477
+ df['loading_rate'] = df['load']/df['watershed_area']
478
+ return df
274
479
 
275
480
  #%% Contributions
276
481
  allocation_selector = {'Q': {'input': ['IVOL'],
@@ -425,6 +630,7 @@ def weighted_describe(df, value_col, weight_col):
425
630
  # })
426
631
 
427
632
 
633
+
428
634
  def monthly_avg_constituent_loading(constituent,uci,hbn):
429
635
  dfs = []
430
636
  for t_opn in ['PERLND','IMPLND']:
@@ -727,9 +933,27 @@ def avg_annual_precip(uci,wdm):
727
933
 
728
934
  #%%
729
935
  #%%% Other Reports
936
+ # ts_names = ['PRECIP','PERO','AGWO','IFWO','SURO']
937
+ # operation = 'PERLND'
938
+ # t_code = 4
939
+ # df = pd.concat([hbn.get_multiple_timeseries(operation,t_code,ts_name,opnids = opnids) for ts_name in ts_names],axis =0)
940
+ # df = pd.merge(df,subwatershed.reset_index(),left_on = ['OPERATION','OPNID'],right_on = ['SVOL','SVOLNO'],how='inner')
941
+ # df[['datetime','TVOLNO','OPERATION','OPNID','LSID','AFACTR','TIMESERIES','value']]
730
942
 
943
+ # grouping_columns = ['TVOLNO','LSID','TIMESERIES']
944
+ # df.groupby[grouping_columns]
731
945
 
946
+ def weighted_output(uci,hbn,ts_name,operation = 'PERLND',t_code = 5,opnids = None):
947
+ df = hbn.get_multiple_timeseries(operation,t_code,ts_name,opnids = opnids)
732
948
 
949
+ subwatersheds = uci.network.subwatersheds().reset_index()
950
+ subwatersheds = subwatersheds.loc[subwatersheds['SVOL'] == operation]
951
+
952
+
953
+ df = pd.merge(subwatersheds,df,left_on = 'SVOLNO', right_on='SVOLNO',how='left')
954
+ df = weighted_mean(df,ts_name,'AFACTR')
955
+ df = df.set_index([df.index,'AFACTR'])
956
+ return df
733
957
 
734
958
  def weighted_mean(df,value_col,weight_col):
735
959
  weighted_mean = (df[value_col] * df[weight_col]).sum() / df[weight_col].sum()
@@ -737,9 +961,11 @@ def weighted_mean(df,value_col,weight_col):
737
961
  'AFACTR' : df[weight_col].sum(),
738
962
  value_col: [weighted_mean]})
739
963
 
740
- def annual_weighted_output(uci,hbn,ts_name,operation = 'PERLND',t_code = 5,opnids = None,group_by = None):
964
+ def annual_weighted_output(uci,hbn,ts_name,operation = 'PERLND',t_code = 5,opnids = None,group_by = None,start_year = 1996,end_year = 2100):
741
965
  assert (group_by in [None,'landcover','opnid'])
742
- df = hbn.get_multiple_timeseries(operation,t_code,ts_name,opnids = opnids).mean().reset_index()
966
+ df = hbn.get_multiple_timeseries(operation,t_code,ts_name,opnids = opnids)
967
+ df = df.loc[(df.index.year >= start_year) & (df.index.year <= end_year)]
968
+ df = df.mean().reset_index()
743
969
  df.columns = ['SVOLNO',ts_name]
744
970
  subwatersheds = uci.network.subwatersheds().reset_index()
745
971
  subwatersheds = subwatersheds.loc[subwatersheds['SVOL'] == operation]
@@ -759,41 +985,41 @@ def annual_weighted_output(uci,hbn,ts_name,operation = 'PERLND',t_code = 5,opnid
759
985
  return df
760
986
 
761
987
 
762
- def monthly_weighted_output(uci,hbn,ts_name,operation = 'PERLND',opnids = None, as_rate = False, by_landcover = True, months = [1,2,3,4,5,6,7,8,9,10,11,12]):
763
- df = hbn.get_multiple_timeseries(operation,4,ts_name,opnids = opnids)
764
- df = df.loc[df.index.month.isin(months)]
988
+ # def monthly_weighted_output(uci,hbn,ts_name,operation = 'PERLND',opnids = None, as_rate = False, by_landcover = True, months = [1,2,3,4,5,6,7,8,9,10,11,12]):
989
+ # df = hbn.get_multiple_timeseries(operation,4,ts_name,opnids = opnids)
990
+ # df = df.loc[df.index.month.isin(months)]
765
991
 
766
- areas = uci.network.operation_area(operation)
767
- areas.loc[areas.index.intersection(df.columns)]
768
- df = df[areas.index.intersection(df.columns)]
992
+ # areas = uci.network.operation_area(operation)
993
+ # areas.loc[areas.index.intersection(df.columns)]
994
+ # df = df[areas.index.intersection(df.columns)]
769
995
 
770
- df = (df.groupby(df.index.month).mean()*areas['AFACTR'])
996
+ # df = (df.groupby(df.index.month).mean()*areas['AFACTR'])
771
997
 
772
- if by_landcover:
773
- df = df.T.groupby(areas['LSID']).sum().T
774
- if as_rate:
775
- df = df/areas['AFACTR'].groupby(areas['LSID']).sum().to_list()
776
- else:
777
- if as_rate:
778
- df = df/areas['AFACTR'].sum()
998
+ # if by_landcover:
999
+ # df = df.T.groupby(areas['LSID']).sum().T
1000
+ # if as_rate:
1001
+ # df = df/areas['AFACTR'].groupby(areas['LSID']).sum().to_list()
1002
+ # else:
1003
+ # if as_rate:
1004
+ # df = df/areas['AFACTR'].sum()
779
1005
 
780
- df.columns.name = ts_name
1006
+ # df.columns.name = ts_name
781
1007
 
782
- return df
1008
+ # return df
783
1009
 
784
- def monthly_perlnd_runoff(uci,hbn):
785
- ts_names = ['PRECIP','PERO','AGWO','IFWO','SURO']
786
- df = pd.concat({ts_name:monthly_weighted_output(uci,hbn,ts_name,by_landcover=True,as_rate=True) for ts_name in ts_names},keys =ts_names)
787
- suro_perc = (df.loc['SURO']/df.loc['PERO'])*100
788
- suro_perc = suro_perc.reset_index()
789
- suro_perc['name'] = 'SURO_perc'
790
- suro_perc = suro_perc.set_index(['name','index'])
791
- return pd.concat([df,suro_perc])
1010
+ # def monthly_perlnd_runoff(uci,hbn):
1011
+ # ts_names = ['PRECIP','PERO','AGWO','IFWO','SURO']
1012
+ # df = pd.concat({ts_name:monthly_weighted_output(uci,hbn,ts_name,by_landcover=True,as_rate=True) for ts_name in ts_names},keys =ts_names)
1013
+ # suro_perc = (df.loc['SURO']/df.loc['PERO'])*100
1014
+ # suro_perc = suro_perc.reset_index()
1015
+ # suro_perc['name'] = 'SURO_perc'
1016
+ # suro_perc = suro_perc.set_index(['name','index'])
1017
+ # return pd.concat([df,suro_perc])
792
1018
 
793
1019
 
794
- def annual_perlnd_runoff(uci,hbn):
1020
+ def annual_perlnd_runoff(uci,hbn,opnids = None,start_year=1996,end_year=2100):
795
1021
  ts_names = ['PRECIP','PERO','AGWO','IFWO','SURO']
796
- df = pd.concat([annual_weighted_output(uci,hbn,ts_name,group_by='landcover') for ts_name in ts_names],axis = 1)
1022
+ df = pd.concat([annual_weighted_output(uci,hbn,ts_name,group_by='landcover',opnids=opnids,start_year=start_year,end_year=end_year) for ts_name in ts_names],axis = 1)
797
1023
  df.columns = ts_names
798
1024
  df['suro_perc'] = (df['SURO']/df['PERO'])*100
799
1025
  return df
@@ -858,48 +1084,6 @@ def annual_sediment_budget(uci,hbn):
858
1084
  df.columns = ['Sediment','Percentage']
859
1085
  return df
860
1086
 
861
- # def annual_loading_rate():
862
-
863
-
864
- # def annual_yield(uci,hbn,constituent):
865
-
866
-
867
- def subwatershed_weighted_output(uci,hbn,reach_ids,ts_name,time_step,by_landcover=False,as_rate = True):
868
- subwatersheds = uci.network.subwatersheds(reach_ids)
869
- subwatersheds = subwatersheds.loc[subwatersheds['SVOL'] == 'PERLND']
870
-
871
- areas = subwatersheds[['SVOLNO','AFACTR']].set_index('SVOLNO')
872
- areas = areas.join( uci.table('PERLND','GEN-INFO')['LSID'])
873
- opnids = subwatersheds['SVOLNO'].to_list()
874
-
875
- df = hbn.get_multiple_timeseries('PERLND',time_step,ts_name,opnids = opnids)
876
-
877
- areas.loc[areas.index.intersection(df.columns)]
878
- df = df[areas.index.intersection(df.columns)]
879
-
880
- if by_landcover:
881
- df = (df*areas['AFACTR'].values).T.groupby(areas['LSID']).sum()
882
- if as_rate:
883
- df = df.T/areas['AFACTR'].groupby(areas['LSID']).sum().to_list()
884
- df.columns.name = ts_name
885
- else:
886
- df = (df * areas['AFACTR'].values).sum(axis=1)
887
- if as_rate:
888
- df = df/areas['AFACTR'].sum()
889
- df.name = ts_name
890
-
891
- return df
892
-
893
-
894
-
895
-
896
-
897
- # def perlnd_water_budget(uci,hbn,time_step = 5):
898
-
899
- # ts_names = ['SUPY','SURO','IFWO','AGWO','PERO','AGWI','IGWI','PET','UZET','LZET','AGWET','BASET','TAET']
900
- # dfs = [area_weighted_output(uci,hbn,ts_name,time_step) for ts_name in ts_names]
901
-
902
-
903
1087
 
904
1088
 
905
1089
  #%% Phosphorous Loading Calculations
@@ -944,10 +1128,15 @@ def total_phosphorous(uci,hbn,t_code,operation = 'PERLND'):
944
1128
  totals = []
945
1129
  for mlno in opnids['MLNO'].unique():
946
1130
  total = dissolved_orthophosphate(uci,hbn,operation,mlno,t_code) + particulate_orthophosphate(uci,hbn,operation,mlno, t_code) + organic_refactory_phosphorous(uci,hbn,operation,mlno,t_code) + labile_oxygen_demand(uci,hbn,operation,mlno,t_code)*0.007326 # Conversation factor to P
947
- totals.append(total[opnids['SVOLNO'].loc[opnids['MLNO'] == mlno].to_list()])
948
-
949
- total = pd.concat(totals,axis=1)
950
- total = total.T.groupby(total.columns).sum().T
1131
+ if isinstance(total, (int, float)): #TODO fix for when no data is present. Don't like this workaround.
1132
+ pass
1133
+ elif not total.empty:
1134
+ valid_opnids = total.columns.intersection(opnids['SVOLNO'].loc[opnids['MLNO'] == mlno])
1135
+ totals.append(total[valid_opnids])
1136
+
1137
+ if len(totals) > 0:
1138
+ total = pd.concat(totals,axis=1)
1139
+ total = total.T.groupby(total.columns).sum().T
951
1140
  return total
952
1141
 
953
1142
 
@@ -974,14 +1163,17 @@ MASSLINK_SCHEME = {'dissolved_orthophosphate': {'tmemn': 'NUIF1',
974
1163
  'tmemsb2':''}}
975
1164
 
976
1165
 
1166
+
977
1167
  def qualprop_transform(uci,hbn,operation,mlno,tmemn,tmemsb1,tmemsb2 = '',t_code = 4):
978
1168
  masslink = uci.table('MASS-LINK',f'MASS-LINK{mlno}')
979
1169
  masslink = masslink.loc[(masslink['TMEMN'] == tmemn) & (masslink['TMEMSB1'] == tmemsb1) & (masslink['TMEMSB2'] == tmemsb2)]
980
1170
  masslink.fillna({'MFACTOR': 1}, inplace=True)
981
1171
  ts = 0
982
1172
  for index,row in masslink.iterrows():
983
- hbn_name = uci.table(operation,'QUAL-PROPS', int(row['SMEMSB1']) - 1).iloc[0]['QUALID']
984
- hbn_name = row['SMEMN'] + hbn_name
1173
+ hbn_name = row['SMEMN']
1174
+ if hbn_name in ['IOQUAL','SOQUAL','POQUAL','AOQUAL']:
1175
+ qual_name = uci.table(operation,'QUAL-PROPS', int(row['SMEMSB1']) - 1).iloc[0]['QUALID']
1176
+ hbn_name = row['SMEMN'] + qual_name
985
1177
  mfactor = row['MFACTOR']
986
1178
  ts = hbn.get_multiple_timeseries(row['SVOL'],t_code,hbn_name)*mfactor + ts
987
1179
  return ts