hspf 2.0.1__tar.gz → 2.0.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. hspf-2.0.3/.gitignore +3 -0
  2. {hspf-2.0.1 → hspf-2.0.3}/PKG-INFO +1 -1
  3. {hspf-2.0.1 → hspf-2.0.3}/pyproject.toml +1 -1
  4. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/hbn.py +5 -4
  5. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/parser/graph.py +70 -261
  6. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/parser/parsers.py +4 -1
  7. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/uci.py +32 -5
  8. hspf-2.0.3/tests/__init__.py +0 -0
  9. hspf-2.0.3/tests/data/Clearwater.tpl +19727 -0
  10. hspf-2.0.3/tests/data/Clearwater.uci +16857 -0
  11. hspf-2.0.3/tests/test_graph.py +0 -0
  12. hspf-2.0.3/tests/test_uci.py +11 -0
  13. hspf-2.0.1/.gitignore +0 -1
  14. {hspf-2.0.1 → hspf-2.0.3}/.gitattributes +0 -0
  15. {hspf-2.0.1 → hspf-2.0.3}/MANIFEST.in +0 -0
  16. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/__init__.py +0 -0
  17. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/data/ParseTable.csv +0 -0
  18. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/data/Timeseries Catalog/IMPLND/IQUAL.txt +0 -0
  19. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/data/Timeseries Catalog/IMPLND/IWATER.txt +0 -0
  20. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/data/Timeseries Catalog/IMPLND/IWTGAS.txt +0 -0
  21. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/data/Timeseries Catalog/IMPLND/SOLIDS.txt +0 -0
  22. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/data/Timeseries Catalog/PERLND/MSTLAY.txt +0 -0
  23. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/data/Timeseries Catalog/PERLND/PQUAL.txt +0 -0
  24. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/data/Timeseries Catalog/PERLND/PSTEMP.txt +0 -0
  25. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/data/Timeseries Catalog/PERLND/PWATER.txt +0 -0
  26. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/data/Timeseries Catalog/PERLND/PWATGAS.txt +0 -0
  27. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/data/Timeseries Catalog/PERLND/SEDMNT.txt +0 -0
  28. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/data/Timeseries Catalog/PERLND/SNOW.txt +0 -0
  29. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/data/Timeseries Catalog/RCHRES/CONS.txt +0 -0
  30. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/data/Timeseries Catalog/RCHRES/GQUAL.txt +0 -0
  31. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/data/Timeseries Catalog/RCHRES/HTRCH.txt +0 -0
  32. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/data/Timeseries Catalog/RCHRES/HYDR.txt +0 -0
  33. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/data/Timeseries Catalog/RCHRES/NUTRX.txt +0 -0
  34. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/data/Timeseries Catalog/RCHRES/OXRX.txt +0 -0
  35. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/data/Timeseries Catalog/RCHRES/PLANK.txt +0 -0
  36. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/data/Timeseries Catalog/RCHRES/SEDTRN.txt +0 -0
  37. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/helpers.py +0 -0
  38. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/hspfModel.py +0 -0
  39. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/parser/__init__.py +0 -0
  40. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/reports.py +0 -0
  41. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/wdm.py +0 -0
  42. {hspf-2.0.1 → hspf-2.0.3}/src/hspf/wdmReader.py +0 -0
hspf-2.0.3/.gitignore ADDED
@@ -0,0 +1,3 @@
1
+ *.pyc
2
+ *.whl
3
+ *.gz
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hspf
3
- Version: 2.0.1
3
+ Version: 2.0.3
4
4
  Summary: Python package for downloading and running HSPF models
5
5
  Project-URL: Homepage, https://github.com/mfratkin1/pyHSPF
6
6
  Author-email: Mulu Fratkin <michael.fratkin@state.mn.us>
@@ -5,7 +5,7 @@ build-backend = "hatchling.build"
5
5
  [project]
6
6
  name = "hspf"
7
7
  urls = { "Homepage" = "https://github.com/mfratkin1/pyHSPF" } # ? Add this!
8
- version = "2.0.1"
8
+ version = "2.0.3"
9
9
  dependencies = [
10
10
  "pandas",
11
11
  "requests",
@@ -12,9 +12,8 @@ import math
12
12
  from struct import unpack
13
13
  from numpy import fromfile
14
14
  from pandas import DataFrame
15
- from datetime import datetime, timedelta, timezone
15
+ from datetime import datetime, timedelta #, timezone
16
16
  from collections import defaultdict
17
- import numpy as np
18
17
  #from pathlib import Path
19
18
 
20
19
 
@@ -350,11 +349,13 @@ class hbnClass:
350
349
  rows = []
351
350
  times = []
352
351
  nvals = len(self.mapn[operation, id, activity]) # number constituent timeseries
353
- utc_offset = timezone(timedelta(hours=-6)) #UTC is 6hours ahead of CST
352
+ #utc_offset = timezone(timedelta(hours=-6)) #UTC is 6hours ahead of CST
354
353
  for (index, reclen) in self.mapd[operation, id, activity, tcode]:
355
354
  yr, mo, dy, hr, mn = unpack('5I', self.data[index + 36: index + 56])
356
355
  hr = hr-1
357
- dt = datetime(yr, mo, dy, 0, mn ,tzinfo=utc_offset) + timedelta(hours=hr)
356
+ #dt = datetime(yr, mo, dy, 0, mn ,tzinfo=utc_offset) + timedelta(hours=hr)
357
+ dt = datetime(yr, mo, dy, 0, mn ) + timedelta(hours=hr)
358
+
358
359
  times.append(dt)
359
360
 
360
361
  index += 56
@@ -17,43 +17,7 @@ class Node(object):
17
17
 
18
18
  def __str__(self):
19
19
  return self._label
20
-
21
- # class PerlndNode(Node):
22
- # raise NotImplementedError
23
-
24
- # class ReachNode(Node):
25
- # raise NotImplementedError
26
-
27
- # class ImplndNode(Node):
28
- # raise NotImplementedError
29
-
30
- # class SourceNode(Node):
31
- # raise NotImplementedError
32
-
33
- # class TargetNode(Node):
34
- # raise NotImplementedError
35
-
36
- # class MetNode(Node):
37
- # raise NotImplementedError
38
-
39
-
40
-
41
- # class wdmNode(Node):
42
- # raise NotImplementedError
43
-
44
-
45
- # # Add Parameter Nodes Add edges at same time since it's expensive to determine associated plern/implnd/reach node
46
- # keys = [key for key in uci.uci.keys() if key[0] in ['IMPLND','RCHRES','PERLND']]
47
- # for operation,table_name,table_id in keys:
48
- # parms = uci.table(operation,table_name,table_id)
49
- # for opnid, row in parms.iterrows():
50
- # target_node = graph.get_node(G,operation,opnid)
51
- # for parameter in row.index:
52
- # G.add_node(max(G.nodes) + 1, type = 'Parameter', value = row[parameter], name = parameter, operation = operation, table_name = table_name, table_id = table_id)
53
- # #labels[(operation,parameter,table_id)] = [max(G.nodes)]
54
- # G.add_edge(max(G.nodes), target_node)
55
-
56
-
20
+
57
21
 
58
22
  def create_graph(uci):
59
23
 
@@ -101,27 +65,6 @@ def create_graph(uci):
101
65
  tmemsb1 = row['TMEMSB1'],
102
66
  tmemsb2 = row['TMEMSB2'])
103
67
 
104
- # _ = [G.add_edge(row['snode'],row['tnode'],
105
- # mlno = row['MLNO'],
106
- # area = row['AFACTR'],
107
- # tmemsb1 = row['TMEMSB1'],
108
- # tmemsb2 = row['TMEMSB2']) for index, row in schematic.iterrows()]
109
-
110
-
111
-
112
- #Define edges from Ext Sources
113
- # ext_sources['snode'] = ext_sources.index.map(labels)
114
- # ext_sources.set_index(['TVOL','TOPFST'],inplace=True)
115
- # ext_sources['tnode'] = ext_sources.index.map(labels)
116
- # _ = [G.add_edge(row['snode'],row['tnode'],
117
- # smemn = row['SMEMN'],
118
- # smemsb = row['SMEMSB'],
119
- # mfactor = row['MFACTOR'],
120
- # tran = row['TRAN'],
121
- # tmemn = row['TMEMN'],
122
- # tmemsb1 = row['TMEMSB1'],
123
- # tmemsb2 = row['TMEMSB2']) for index, row in ext_sources.iterrows()]
124
-
125
68
 
126
69
 
127
70
  # Add property information
@@ -139,200 +82,10 @@ def create_graph(uci):
139
82
  G.nodes[labels[('RCHRES',index)]]['name'] = row['RCHID']
140
83
  G.nodes[labels[('RCHRES',index)]]['lkfg'] = row['LKFG']
141
84
 
142
-
143
- # # Add property information
144
- # bininfo = uci.table('PERLND','BINARY-INFO')
145
- # for index,row in geninfo.iterrows():
146
- # G.nodes[labels[('PERLND',index)]]['name'] = row['LSID']
147
-
148
- # bininfo = uci.table('IMPLND','BINARY-INFO')
149
- # for index,row in geninfo.iterrows():
150
- # G.nodes[labels[('IMPLND',index)]]['name'] = row['LSID']
151
-
152
- # bininfo = uci.table('RCHRES','BINARY-INFO')
153
- # for index,row in geninfo.iterrows():
154
- # G.nodes[labels[('RCHRES',index)]]['name'] = row['RCHID']
155
- # G.nodes[labels[('RCHRES',index)]]['lkfg'] = row['LKFG']
156
-
157
-
158
- # labels = {}
159
- # for n, d in G.nodes(data=True):
160
- # l = (d['operation'],d['opnid'])
161
- # labels[l] = labels.get(l, [])
162
- # labels[l].append(n)
163
-
164
-
165
-
166
85
  G.labels = labels
167
86
  return G
168
87
 
169
88
 
170
- # def create_subgraph(G,start_node):
171
- # sub_G = nx.MultDiGraph()
172
- # for n in G.successors_iter(start_node):
173
- # sub_G.add_path([start_node,n])
174
- # create_subgraph(G,sub_G,n)
175
-
176
- # Binary info
177
-
178
- """
179
- CREATE TABLE Operation (
180
- opn VARCHAR,
181
- opnid INTEGER,
182
- PRIMARY KEY (opn, opnid)
183
-
184
-
185
-
186
- )
187
-
188
-
189
-
190
-
191
- """
192
-
193
-
194
-
195
-
196
- """
197
- CREATE TABLE Files (
198
- ftype VARCHAR,
199
- unit INTEGER NOT NULL PRIMARY KEY,,
200
- filename VARCHAR
201
- );
202
-
203
- """
204
-
205
-
206
- """
207
- CREATE TABLE GenInfo (
208
- pk INTEGER NOT NULL,
209
- opn VARCHAR,
210
- opnid INTEGER,
211
- PRIMARY KEY (opn, opnid)
212
- iunits INTEGER,
213
- ounits INTEGER,
214
- punit1 INTEGER,
215
- punit2 INTEGER,
216
- BUNIT1 INTEGER,
217
- BUNIT2 INTEGER
218
- );
219
-
220
- """
221
-
222
- # # Files
223
- # files = uci.table('FILES')
224
- # files['FTYPE'] = files['FTYPE'].replace({'WDM': 'WDM1'})
225
- # dfs = []
226
-
227
- # # PerlndInfo
228
- # operation = 'PERLND'
229
- # geninfo = uci.table(operation,'GEN-INFO')
230
- # binaryinfo = uci.table(operation,'BINARY-INFO')
231
- # if operation == 'RCHRES':
232
- # geninfo = geninfo.rename(columns = {'RCHID':'LSID',
233
- # 'BUNITE':'BUNIT1',
234
- # 'BUNITM': 'BUNIT2',
235
- # 'PUNITE': 'PUNIT1',
236
- # 'PUNITM': 'PUNIT2'})
237
- # df = pd.merge(geninfo,binaryinfo, left_index = True, right_index = True, how = 'outer').reset_index()
238
- # df.insert(0,'OPN',operation)
239
- # df = pd.merge(df,files, left_on = 'BUNIT1', right_on = 'UNIT')
240
-
241
- # # Schematic Table
242
- # schematic = uci.table('SCHEMATIC')
243
-
244
- # # Masslink Table
245
- # masslinks = []
246
- # for table_name in uci.table_names('MASS-LINK'):
247
- # mlno = table_name.split('MASS-LINK')[1]
248
- # masslink = uci.table('MASS-LINK',table_name)
249
- # masslink.insert(0,'MLNO',mlno)
250
- # masslinks.append(masslink)
251
- # masslinks = pd.concat(masslinks)
252
-
253
- # #masslinks['QUALID'] = (masslinks['SMEMSB1'].str.strip().replace('','0').astype(int)-1).replace(-1,pd.NA)
254
-
255
-
256
-
257
- # hbn_name = uci.table('PERLND','QUAL-PROPS', int(row['SMEMSB1']) - 1).iloc[0]['QUALID']
258
-
259
- # operation = row['SVOL']
260
- # activity = row['SGRPN']
261
- # ts_name = row['SMEMN']
262
-
263
- # hbn_name = row['SMEMN'] + hbn_name
264
-
265
-
266
- # schematic = uci.table('SCHEMATIC')
267
- # schematic = pd.merge(schematic,masslinks,left_on = 'MLNO',right_on = 'MLNO')
268
-
269
- # all(schematic['SVOL_x'] == schematic['SVOL_y'])
270
- # all(schematic['TVOL_x'] == schematic['TVOL_y'])
271
- # schematic.loc[schematic['TMEMSB1_x'] == '', 'TMEMSB1_y'] = schematic['TMEMSB1_x']
272
- # schematic.loc[schematic['TMEMSB2_x'] == '', 'TMEMSB2_y'] = schematic['TMEMSB2_x']
273
-
274
- # schematic = schematic.drop(columns=['TMEMSB1_y','TMEMSB2_y','TVOL_y','SVOL_y'])
275
- # schematic = schematic.rename(columns = {'SVOL_x':'SVOL',
276
- # 'TVOL_x':'TVOL',
277
- # 'TMEMSB2_x':'TMEMSB2',
278
- # 'TMEMSB1_x':'TMEMSB1'})
279
-
280
-
281
-
282
- # # Watershed Weighted Mean
283
- # subwatersheds = uci.network.subwatersheds()
284
- # subwatersheds = subwatersheds.loc[subwatersheds['SVOL'] == 'PERLND'].reset_index()
285
- # df = cal.model.hbns.get_multiple_timeseries('PERLND',5,'PERO',test['SVOLNO'].values).mean().reset_index()
286
- # df.columns = ['OPNID','value']
287
- # weighted_mean = df[['value','AFACTR']].groupby(df['LSID']).apply(lambda x: (x['value'] * x['AFACTR']).sum() / x['AFACTR'].sum())
288
- # weighted_mean.loc['combined'] = (df['value'] * df['AFACTR']).sum() / df['AFACTR'].sum()
289
-
290
-
291
- # # annual weighted timeseries watershed
292
- # reach_ids = [103,119,104,118]
293
- # subwatersheds = uci.network.subwatersheds().loc[reach_ids]
294
- # subwatersheds = subwatersheds.loc[subwatersheds['SVOL'] == 'PERLND'].reset_index()
295
- # df = cal.model.hbns.get_multiple_timeseries('PERLND',5,'PERO',test['SVOLNO'].values).mean().reset_index()
296
- # df.columns = ['OPNID','value']
297
- # df = pd.merge(subwatersheds,df,left_on = 'SVOLNO', right_on='OPNID')
298
- # weighted_mean = (df['value'] * df['AFACTR']).sum() / df['AFACTR'].sum()
299
- # df[f'weighted_{ts_name}'] = df.groupby('LSID')[parameter].transform(lambda x: (x * df.loc[x.index, 'AFACTR']).sum() / df.loc[x.index, 'AFACTR'].sum())
300
- # weighted_mean.loc['combined'] = (df['value'] * df['AFACTR']).sum() / df['AFACTR'].sum()
301
-
302
-
303
-
304
- # # parameter average weighted by landcover area
305
- # table_name = 'PWAT-PARM2'
306
- # parameter = 'LZSN'
307
- # table_id = 0
308
- # operation = 'PERLND'
309
-
310
- # subwatersheds = uci.network.subwatersheds()
311
- # subwatersheds = subwatersheds.loc[subwatersheds['SVOL'] == 'PERLND'].reset_index()
312
- # df = uci.table(operation,table_name,table_id)[parameter].reset_index()
313
- # df.columns = ['OPNID',parameter]
314
- # df = pd.merge(subwatersheds,df,left_on = 'SVOLNO', right_on='OPNID')
315
- # df[f'weighted_{parameter}'] = df.groupby('TVOLNO')[parameter].transform(lambda x: (x * df.loc[x.index, 'AFACTR']).sum() / df.loc[x.index, 'AFACTR'].sum())
316
-
317
-
318
- # #df[f'weighted_{parameter}'] = df.groupby('LSID')[parameter].transform(lambda x: (x * df.loc[x.index, 'AFACTR']).sum() / df.loc[x.index, 'AFACTR'].sum())
319
-
320
-
321
-
322
-
323
- # extsources = uci.table('EXT SOURCES')
324
- # extsources['SVOL'] = extsources['SVOL'].replace({'WDM': 'WDM1'})
325
-
326
- # df = pd.merge(extsources,df,left_on = 'SVOL',right_on = 'FTYPE',how = 'right')
327
-
328
-
329
- # exttargets = uci.table('EXT TARGETS')
330
- # schematic = uci.table('SCHEMATIC')
331
-
332
-
333
- #%% Methods using universal node id
334
-
335
-
336
89
  def _add_subgraph_labels(G,G_sub):
337
90
  G_sub.labels = {label:node for label, node in G.labels.items() if node in G_sub.nodes}
338
91
  return G_sub
@@ -417,17 +170,19 @@ def nodes(G,node_type,node_type_id,adjacent_node_type):
417
170
  #%% Methods using node_type, node_type_id interface
418
171
 
419
172
  def upstream_network(G,reach_id):
420
- return G.subgraph(nx.ancestors(G,get_node_id(G,'RCHRES',reach_id))).copy()
173
+ node_id = get_node_id(G,'RCHRES',reach_id)
174
+ return G.subgraph([node_id] + list(nx.ancestors(G,node_id))).copy()
421
175
 
422
176
  def downstream_network(G,reach_id):
423
- return G.subgraph(nx.descendants(G,get_node_id(G,'RCHRES',reach_id))).copy()
177
+ node_id = get_node_id(G,'RCHRES',reach_id)
178
+ return G.subgraph([node_id] + list(nx.descendants(G,node_id))).copy()
424
179
 
425
180
  def subset_network(G,reach_id,upstream_reach_ids = None):
426
181
  G = upstream_network(G,reach_id)
427
182
  if upstream_reach_ids is not None:
428
- [G.remove_nodes_from(nx.ancestors(G,upstream_reach_id)) for upstream_reach_id in upstream_reach_ids if upstream_reach_id in G.nodes]
429
- [G.remove_nodes_from([upstream_reach_id]) for upstream_reach_id in upstream_reach_ids if upstream_reach_id in G.nodes]
430
- #assert([len(sinks(G)) == 0,sinks(G)[0] == reach_id])
183
+ for upstream_reach_id in upstream_reach_ids:
184
+ G.remove_nodes_from(get_node_ids(upstream_network(G,upstream_reach_id),'RCHRES'))
185
+ #assert([len(sinks(G)) == 0,sinks(G)[0] == reach_id])
431
186
  return G
432
187
 
433
188
  def upstream_nodes(G,reach_id,upstream_node_type):
@@ -519,7 +274,24 @@ def paths(G,reach_id,source_type = 'RCHRES'):
519
274
  def count_ancestors(G,node_type,ancestor_node_type):
520
275
  return {node['type_id']:len(ancestors(G,node['id'],ancestor_node_type)) for node in get_nodes(G,node_type)}
521
276
 
522
-
277
+ # def catchment_ids(G):
278
+ # result = []
279
+ # for node in get_node_ids(G,'RCHRES'):
280
+ # upstream_nodes = G.predecessors(node)
281
+ # if any([G.nodes[up]['type'] in ['PERLND','IMPLND'] for up in upstream_nodes]):
282
+ # result.append(G.nodes[node]['type_id'])
283
+ # return result
284
+
285
+ # Very expensive. Should probably standardize it so routing reaches have no implnds/perlnds
286
+ def catchment_ids(G):
287
+ result = []
288
+ for node in get_node_ids(G,'RCHRES'):
289
+ upstream_nodes = G.predecessors(node)
290
+ if any([G.nodes[up]['type'] in ['PERLND','IMPLND'] for up in upstream_nodes]):
291
+ cat = make_catchment(G,G.nodes[node]['type_id'])
292
+ if area(cat) > 0:
293
+ result.append(G.nodes[node]['type_id'])
294
+ return result
523
295
 
524
296
  # Catchment constructor
525
297
  def make_catchment(G,reach_id):
@@ -550,8 +322,28 @@ def make_watershed(G,reach_ids):
550
322
  return watershed
551
323
 
552
324
 
553
- def catcments(G):
554
- return None
325
+ # def catcments(G):
326
+ # cats = [Catchment(graph.make_catchment(G,reach_id) for reach_id in graph.get_node_type_ids(G,'RCHRES'))]
327
+
328
+ # return
329
+
330
+ # for u, v, edge_data in graph.make_catchment(G,reach_id).edges(data=True):
331
+ # source_node_attributes = G.nodes[u]
332
+ # # Add or update edge attributes with source node attributes
333
+ # edge_data["source_type"] = source_node_attributes.get("type")
334
+ # edge_data["source_name"] = source_node_attributes.get("name")
335
+ # edge_data["source_type_id"] = source_node_attributes.get("type_id")
336
+ # cats.append(edge_data)
337
+
338
+ # return pd.DataFrame(cats)
339
+
340
+
341
+ # for node in G.nodes:
342
+ # upstream_nodes = G.predecessors(node)
343
+ # if any(G.nodes[up]['type'] in ['PELND','IMPLND'] for up in upstream_nodes):
344
+ # result.append(node)
345
+
346
+ # return None
555
347
  # Catchment selectors
556
348
 
557
349
  '''
@@ -637,9 +429,12 @@ class Catchment():
637
429
  #%% Legacy Methods for Backwards compatability
638
430
  class reachNetwork():
639
431
  def __init__(self,uci,reach_id = None):
432
+ self.uci = uci
640
433
  self.G = create_graph(uci)
434
+ self.catchment_ids = catchment_ids(self.G)
435
+ self.routing_reaches = self._routing_reaches()
436
+ self.lakes = self._lakes()
641
437
  self.schematic = uci.table('SCHEMATIC').astype({'TVOLNO': int, "SVOLNO": int, 'AFACTR':float})
642
- self.uci = uci
643
438
 
644
439
  def get_node_type_ids(self,node_type):
645
440
  return get_node_type_ids(self.G, node_type)
@@ -709,15 +504,18 @@ class reachNetwork():
709
504
 
710
505
  def subwatersheds(self,reach_ids = None):
711
506
  df = subwatersheds(self.uci)
712
- if reach_ids is not None:
713
- df = df.loc[df.index.intersection(reach_ids)]
714
- return df
507
+ if reach_ids is None:
508
+ reach_ids = get_node_type_ids(self.G,'RCHRES')
509
+ return df.loc[df.index.intersection(reach_ids)]
715
510
 
716
511
  def subwatershed(self,reach_id):
717
512
  return subwatershed(self.uci,reach_id) #.loc[reach_id]
718
513
 
719
514
  def subwatershed_area(self,reach_id):
720
- return self.drainage(reach_id).query("source_type in ['PERLND','IMPLND']")['area'].sum()
515
+ area = self.drainage(reach_id).query("source_type in ['PERLND','IMPLND']")['area'].sum()
516
+ # if (reach_id in self.lakes()) & (f'FTABLE{reach_id}' in self.uci.table_names('FTABLES')):
517
+ # area = area + self.lake_area(reach_id)
518
+ return area
721
519
 
722
520
  def reach_contributions(self,operation,opnids):
723
521
  return reach_contributions(self.uci,operation,opnids)
@@ -736,6 +534,15 @@ class reachNetwork():
736
534
  def outlets(self):
737
535
  return [self.G.nodes[node]['type_id'] for node, out_degree in self.G.out_degree() if (out_degree == 0) & (self.G.nodes[node]['type'] == 'RCHRES')]
738
536
 
537
+ def _lakes(self):
538
+ return list(self.uci.table('RCHRES','GEN-INFO').query('LKFG == 1',engine = 'python').index.astype(int))
539
+
540
+ def lake_area(self,reach_id):
541
+ return self.uci.table('FTABLES',f'FTABLE{reach_id}')['Area'].max()
542
+
543
+ def _routing_reaches(self):
544
+ return [reach_id for reach_id in self.get_node_type_ids('RCHRES') if reach_id not in self.catchment_ids]
545
+
739
546
  def paths(self,reach_id):
740
547
  return paths(self.G,reach_id)
741
548
 
@@ -761,6 +568,7 @@ def calibration_order(G,reach_id,upstream_reach_ids = None):
761
568
 
762
569
  def get_opnids(G,operation,reach_id = None, upstream_reach_ids = None):
763
570
  G = subset_network(G,reach_id,upstream_reach_ids)
571
+ return ancestors(G,get_node_id(G,'RCHRES',reach_id),operation)
764
572
  perlnds = [node['type_id'] for node in get_nodes(G,'PERLND')]
765
573
  implnds = [node['type_id'] for node in get_nodes(G,'IMPLND')]
766
574
  reachs = [node['type_id'] for node in get_nodes(G,'RCHRES')]
@@ -799,6 +607,7 @@ def subwatersheds(uci):
799
607
  schematic = schematic[schematic['TVOL'] == 'RCHRES'][['SVOLNO','TVOLNO','AFACTR','MLNO']].astype({'SVOLNO':int,'TVOLNO':int,'AFACTR':float,'MLNO':int})
800
608
  schematic.reset_index(inplace=True,drop=False)
801
609
  schematic.set_index('TVOLNO',inplace=True)
610
+ schematic = schematic.loc[catchment_ids(uci.network.G)]
802
611
 
803
612
  dfs = []
804
613
  for operation in ['PERLND','IMPLND']:
@@ -52,7 +52,10 @@ class Table():
52
52
 
53
53
  self.parser = parserSelector[self.block]
54
54
  #self.updater = Updater
55
-
55
+
56
+ def _delimiters(self):
57
+ return delimiters(self.block,self.name)
58
+
56
59
  def parse(self):
57
60
  self.data = self.parser.parse(self.block,self.name,self.lines)
58
61
 
@@ -78,7 +78,7 @@ class UCI():
78
78
 
79
79
  def table(self,block,table_name = 'na',table_id = 0,drop_comments = True):
80
80
  # Dynamic parsing of tables when called by user
81
- assert block in ['FILES','PERLND','IMPLND','RCHRES','SCHEMATIC','OPN SEQUENCE','MASS-LINK','EXT SOURCES','NETWORK','GENER','MONTH-DATA','EXT TARGETS','COPY']
81
+ assert block in ['FILES','PERLND','IMPLND','RCHRES','SCHEMATIC','OPN SEQUENCE','MASS-LINK','EXT SOURCES','NETWORK','GENER','MONTH-DATA','EXT TARGETS','COPY','FTABLES']
82
82
 
83
83
  table = self.uci[(block,table_name,table_id)] #[block][table_name][table_id]
84
84
  #TODO move the format_opnids into the Table class?
@@ -176,13 +176,40 @@ class UCI():
176
176
  lines += ['']
177
177
  lines += ['END RUN']
178
178
  self.lines = lines
179
+
180
+
181
+ def _write(self,filepath):
182
+ with open(filepath, 'w') as the_file:
183
+ for line in self.lines:
184
+ the_file.write(line+'\n')
185
+
186
+ def add_parameter_template(self,block,table_name,table_id,parameter,tpl_char = '~'):
179
187
 
180
-
188
+ table = self.table(block,table_name,0,False).reset_index()
189
+ column_names,dtypes,starts,stops = self.uci[(block,table_name,table_id)]._delimiters()
190
+
191
+ width = stops[column_names.index(parameter)] - starts[column_names.index(parameter)]
192
+
193
+ ids = ~table[parameter].isna() # Handle comment lines in uci
194
+
195
+ # Replace paramter name with PEST/PEST++ specification. Note this does not use the HSPF supplemental file so parameters are limited to width of uci file column
196
+ pest_param = tpl_char + parameter.lower() + table.loc[ids,'OPNID'].astype(str)
197
+ pest_param = pest_param.apply(lambda name: name + ' '*(width-len(name)-1)+ tpl_char)
198
+
199
+ table.loc[ids,parameter] = pest_param
200
+ table = table.set_index('OPNID')
201
+ self.replace_table(table,block,table_name,table_id)
202
+
203
+ def write_tpl(self,tpl_char = '~',new_tpl_path = None):
204
+ if new_tpl_path is None:
205
+ new_tpl_path = self.filepath.parent.joinpath(self.filepath.stem + '.tpl')
206
+ self.merge_lines()
207
+ self.lines.insert(0,tpl_char)
208
+ self._write(new_tpl_path)
209
+
181
210
  def write(self,new_uci_path):
182
211
  self.merge_lines()
183
- with open(new_uci_path, 'w') as the_file:
184
- for line in self.lines:
185
- the_file.write(line+'\n')
212
+ self._write(new_uci_path)
186
213
 
187
214
  def update_bino(self,name):
188
215
  #TODO: Move up to busniess/presentation layer
File without changes