hspf 2.0.2__py3-none-any.whl → 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Binary file
hspf/hbn.py CHANGED
@@ -176,6 +176,12 @@ class hbnInterface:
176
176
  def get_multiple_timeseries(self,t_opn,t_code,t_con,opnids = None,activity = None,axis = 1):
177
177
  return pd.concat([hbn.get_multiple_timeseries(t_opn,t_code,t_con,opnids,activity) for hbn in self.hbns],axis = 1)
178
178
 
179
+ def get_perlnd_constituent(self,constituent,perlnd_ids = None,time_step = 5):
180
+ return get_simulated_perlnd_constituent(self,constituent,time_step)
181
+
182
+ def get_implnd_constituent(self,constituent,implnd_ids = None,time_step = 5):
183
+ return get_simulated_implnd_constituent(self,constituent,time_step)
184
+
179
185
  def get_reach_constituent(self,constituent,reach_ids,time_step,unit = None):
180
186
  if constituent == 'Q':
181
187
  df = get_simulated_flow(self,time_step,reach_ids,unit = unit)
@@ -209,48 +215,17 @@ class hbnInterface:
209
215
 
210
216
  return df
211
217
 
212
-
218
+
213
219
  def get_rchres_data(self,constituent,reach_ids,units = 'mg/l',t_code = 'daily'):
214
220
  '''
215
221
  Convience function for accessing the hbn time series associated with our current
216
222
  calibration method. Assumes you are summing across all dataframes.
217
-
218
- Parameters
219
- ----------
220
- hbn : TYPE
221
- DESCRIPTION.
222
- nutrient_id : TYPE
223
- DESCRIPTION.
224
- reach_ids : TYPE
225
- DESCRIPTION.
226
- flux : TYPE, optional
227
- DESCRIPTION. The default is None.
228
-
229
- Returns
230
- -------
231
- df : TYPE
232
- DESCRIPTION.
233
-
234
- '''
235
-
236
-
223
+ '''
237
224
 
238
- t_cons = helpers.get_tcons(constituent,'RCHRES',units)
239
-
240
-
241
-
242
- df = pd.concat([self.get_multiple_timeseries(t_opn = 'RCHRES',
243
- t_code =t_code,
244
- t_con = t_con,
245
- opnids = reach_ids)
246
- for t_con in t_cons],axis = 1).sum(1).to_frame()
247
-
248
- if (constituent == 'Q') & (units == 'cfs'):
249
- df = df/CF2CFS[t_code]*43560 #Acrfeet/invl to cubic feet/s
250
-
225
+ df = pd.concat([self.get_reach_constituent(constituent,[reach_id],t_code,units) for reach_id in reach_ids], axis = 1)
226
+ df.columns = reach_ids
251
227
  df.attrs['unit'] = units
252
228
  df.attrs['constituent'] = constituent
253
- df.attrs['reach_ids'] = reach_ids
254
229
  return df
255
230
 
256
231
 
hspf/hspfModel.py CHANGED
@@ -30,7 +30,7 @@ class hspfModel():
30
30
 
31
31
  # Imposed structures of an hspf model:
32
32
  # 1. all model files are located in the same directory as the uci file.
33
- def __init__(self,uci_file:str):
33
+ def __init__(self,uci_file:str,run_model:bool = False):
34
34
  #wdm_files:list = None,
35
35
  #hbn_files:str = None):
36
36
  # Inputs
@@ -39,7 +39,7 @@ class hspfModel():
39
39
  self.wdm_paths = []
40
40
  self.uci_file = Path(uci_file).resolve()
41
41
  # Validate and load binary data
42
- self.validate_uci()
42
+ self.validate_uci(run_model = run_model)
43
43
 
44
44
 
45
45
  self.hbns = hbn.hbnInterface(self.hbn_paths)
@@ -51,8 +51,28 @@ class hspfModel():
51
51
  # Compositions
52
52
  self.reports = Reports(self.uci,self.hbns,self.wdms)
53
53
 
54
+
55
+ def validate_wdms(self):
56
+ # Ensure wdm files exist and the folders for the other file types exist relative
57
+ # to the uci path
58
+
59
+ for index, row in self.uci.table('FILES',drop_comments = False).iterrows():
60
+ file_path = self.uci_file.parent.joinpath(Path(row['FILENAME']))
61
+ if file_path.suffix.lower() == '.wdm':
62
+ assert file_path.exists(),'File Specified in the UCI does not exist:' + file_path.as_posix()
63
+ self.wdm_paths.append(file_path)
54
64
 
55
- def validate_uci(self):
65
+ def validate_pltgens(self):
66
+ raise NotImplementedError()
67
+
68
+ def validate_folders(self):
69
+ for index, row in self.uci.table('FILES',drop_comments = False).iterrows():
70
+ file_path = self.uci_file.parent.joinpath(Path(row['FILENAME']))
71
+ assert file_path.parent.exists(),'File folder Specified in the UCI does not exist: ' + file_path.as_posix()
72
+
73
+
74
+
75
+ def validate_uci(self,run_model:bool = False):
56
76
  # Ensure wdm files exist and the folders for the other file types exist relative
57
77
  # to the uci path
58
78
 
@@ -63,15 +83,15 @@ class hspfModel():
63
83
  self.wdm_paths.append(file_path)
64
84
  elif file_path.suffix.lower() == '.hbn':
65
85
  assert file_path.parent.exists(),'File folder Specified in the UCI does not exist: ' + file_path.as_posix()
66
- #self.hbns[file_path.name.split('.')[0]] = None
67
- if file_path.exists():
68
- #self.hbns[file_path.name.split('.')[0]] = hbn.hbnClass(file_path)
69
- self.hbn_paths.append(file_path)
70
- else:
71
- self.run_model()
86
+ self.hbn_paths.append(file_path)
72
87
  else:
73
88
  assert file_path.parent.exists(),'File folder Specified in the UCI does not exist: ' + file_path.as_posix()
74
89
 
90
+ if (all(file_path.exists() for file_path in self.hbn_paths)) & (run_model == False):
91
+ pass
92
+ else:
93
+ self.run_model()
94
+
75
95
  def run_model(self,new_uci_file = None):
76
96
 
77
97
  if new_uci_file is None:
@@ -80,14 +100,14 @@ class hspfModel():
80
100
  # new_uci_file = self.model_path.joinpath(uci_name)
81
101
  # self.uci.write(new_uci_file)
82
102
  subprocess.run([self.winHSPF,self.uci_file.as_posix()]) #, stdout=subprocess.PIPE, creationflags=0x08000000)
83
- self.load_uci(new_uci_file)
103
+ self.load_uci(new_uci_file,run_model = False)
84
104
 
85
105
  def load_hbn(self,hbn_name):
86
106
  self.hbns[hbn_name] = hbn.hbnClass(self.uci_file.parent.joinpath(hbn_name).as_posix())
87
107
 
88
- def load_uci(self,uci_file):
108
+ def load_uci(self,uci_file,run_model:bool = False):
89
109
  self.uci = UCI(uci_file)
90
- self.validate_uci()
110
+ self.validate_uci(run_model = run_model)
91
111
 
92
112
  def convert_wdms(self):
93
113
  for wdm_file in self.wdm_paths:
hspf/parser/graph.py CHANGED
@@ -4,10 +4,12 @@ Created on Thu Feb 6 14:50:45 2025
4
4
 
5
5
  @author: mfratki
6
6
  """
7
+
7
8
  import networkx as nx
8
9
  import pandas as pd
9
10
  import numpy as np
10
11
  import math
12
+ from itertools import chain
11
13
 
12
14
  class Node(object):
13
15
  nodes = []
@@ -17,44 +19,38 @@ class Node(object):
17
19
 
18
20
  def __str__(self):
19
21
  return self._label
20
-
21
- # class PerlndNode(Node):
22
- # raise NotImplementedError
23
-
24
- # class ReachNode(Node):
25
- # raise NotImplementedError
26
-
27
- # class ImplndNode(Node):
28
- # raise NotImplementedError
22
+
29
23
 
30
- # class SourceNode(Node):
31
- # raise NotImplementedError
32
24
 
33
- # class TargetNode(Node):
34
- # raise NotImplementedError
25
+ # G = nx.MultiDiGraph()
26
+ # reach_nodes = schematic[['TVOL','TVOLNO']].drop_duplicates().reset_index(drop=True).reset_index()
27
+ # nodes = schematic.loc[schematic['SVOL'].isin(['IMPLND','PERLND','GENER'])][['SVOL','SVOLNO']].reset_index(drop=True).reset_index()
35
28
 
36
- # class MetNode(Node):
37
- # raise NotImplementedError
38
29
 
30
+ # reach_nodes.rename(columns = {'index':'TNODE'},inplace=True)
31
+ # nodes.rename(columns = {'index':'SNODE','TVOL':'OPERATION','TVOLNO':'OPNID'},inplace=True)
32
+ # [G.add_node(row['TNODE'], id = row['TNODE'], category = 'OPERATION', type_id = row['TVOLNO'], type = row['RCHRES'] ) for node,label in reach_nodes.iterrows()]
39
33
 
34
+ # df = pd.merge(schematic,reach_nodes,right_on = ['TVOL','TVOLNO'],left_on = ['TVOL','TVOLNO']).reset_index()
35
+ # df.rename(columns = {'index':'SNODE'},inplace=True)
40
36
 
41
- # class wdmNode(Node):
42
- # raise NotImplementedError
43
37
 
38
+ # for index, row in df.iterrows():
39
+ # if row['SVOL'] == 'GENER':
40
+ # G.add_edge(row['SNODE'],row['TNODE'],
41
+ # mlno = row['MLNO'],
42
+ # count = row['AFACTR'],
43
+ # tmemsb1 = row['TMEMSB1'],
44
+ # tmemsb2 = row['TMEMSB2'])
45
+ # else:
46
+ # G.add_edge(row['SNODE'],row['TNODE'],
47
+ # mlno = row['MLNO'],
48
+ # area = row['AFACTR'],
49
+ # tmemsb1 = row['TMEMSB1'],
50
+ # tmemsb2 = row['TMEMSB2'])
44
51
 
45
- # # Add Parameter Nodes Add edges at same time since it's expensive to determine associated plern/implnd/reach node
46
- # keys = [key for key in uci.uci.keys() if key[0] in ['IMPLND','RCHRES','PERLND']]
47
- # for operation,table_name,table_id in keys:
48
- # parms = uci.table(operation,table_name,table_id)
49
- # for opnid, row in parms.iterrows():
50
- # target_node = graph.get_node(G,operation,opnid)
51
- # for parameter in row.index:
52
- # G.add_node(max(G.nodes) + 1, type = 'Parameter', value = row[parameter], name = parameter, operation = operation, table_name = table_name, table_id = table_id)
53
- # #labels[(operation,parameter,table_id)] = [max(G.nodes)]
54
- # G.add_edge(max(G.nodes), target_node)
52
+ # G = nx.from_pandas_edgelist(df,'SNODE','TNODE',edge_attr = True,edge_key = 'SNODE', create_using=nx.MultiDiGraph())
55
53
 
56
-
57
-
58
54
  def create_graph(uci):
59
55
 
60
56
 
@@ -82,9 +78,9 @@ def create_graph(uci):
82
78
  # Nodes in the schematic block that are missing from the opn sequence block (usually the outlet reach)
83
79
  #schematic.loc[schematic.index.map(labels).isna()]
84
80
  schematic = schematic.loc[schematic[['snode','tnode']].dropna().index] # For now remove that missing node
85
- schematic.loc[:,'TMEMSB1'].replace('',pd.NA,inplace=True)
86
- schematic.loc[:,'TMEMSB2'].replace('',pd.NA,inplace=True)
87
- schematic.loc[:,'MLNO'].replace('',pd.NA,inplace=True)
81
+ schematic.loc[:,'TMEMSB1'] = schematic['TMEMSB1'].replace('',pd.NA)
82
+ schematic.loc[:,'TMEMSB2'] = schematic['TMEMSB2'].replace('',pd.NA)
83
+ schematic.loc[:,'MLNO'] = schematic['MLNO'].replace('',pd.NA)
88
84
 
89
85
  schematic = schematic.astype({'snode': int,'tnode':int,'MLNO':pd.Int64Dtype(),'TMEMSB1':pd.Int64Dtype(),'TMEMSB2':pd.Int64Dtype()})
90
86
  for index, row in schematic.iterrows():
@@ -101,27 +97,6 @@ def create_graph(uci):
101
97
  tmemsb1 = row['TMEMSB1'],
102
98
  tmemsb2 = row['TMEMSB2'])
103
99
 
104
- # _ = [G.add_edge(row['snode'],row['tnode'],
105
- # mlno = row['MLNO'],
106
- # area = row['AFACTR'],
107
- # tmemsb1 = row['TMEMSB1'],
108
- # tmemsb2 = row['TMEMSB2']) for index, row in schematic.iterrows()]
109
-
110
-
111
-
112
- #Define edges from Ext Sources
113
- # ext_sources['snode'] = ext_sources.index.map(labels)
114
- # ext_sources.set_index(['TVOL','TOPFST'],inplace=True)
115
- # ext_sources['tnode'] = ext_sources.index.map(labels)
116
- # _ = [G.add_edge(row['snode'],row['tnode'],
117
- # smemn = row['SMEMN'],
118
- # smemsb = row['SMEMSB'],
119
- # mfactor = row['MFACTOR'],
120
- # tran = row['TRAN'],
121
- # tmemn = row['TMEMN'],
122
- # tmemsb1 = row['TMEMSB1'],
123
- # tmemsb2 = row['TMEMSB2']) for index, row in ext_sources.iterrows()]
124
-
125
100
 
126
101
 
127
102
  # Add property information
@@ -139,208 +114,10 @@ def create_graph(uci):
139
114
  G.nodes[labels[('RCHRES',index)]]['name'] = row['RCHID']
140
115
  G.nodes[labels[('RCHRES',index)]]['lkfg'] = row['LKFG']
141
116
 
142
-
143
- # # Add property information
144
- # bininfo = uci.table('PERLND','BINARY-INFO')
145
- # for index,row in geninfo.iterrows():
146
- # G.nodes[labels[('PERLND',index)]]['name'] = row['LSID']
147
-
148
- # bininfo = uci.table('IMPLND','BINARY-INFO')
149
- # for index,row in geninfo.iterrows():
150
- # G.nodes[labels[('IMPLND',index)]]['name'] = row['LSID']
151
-
152
- # bininfo = uci.table('RCHRES','BINARY-INFO')
153
- # for index,row in geninfo.iterrows():
154
- # G.nodes[labels[('RCHRES',index)]]['name'] = row['RCHID']
155
- # G.nodes[labels[('RCHRES',index)]]['lkfg'] = row['LKFG']
156
-
157
-
158
- # labels = {}
159
- # for n, d in G.nodes(data=True):
160
- # l = (d['operation'],d['opnid'])
161
- # labels[l] = labels.get(l, [])
162
- # labels[l].append(n)
163
-
164
-
165
-
166
117
  G.labels = labels
167
118
  return G
168
119
 
169
120
 
170
- # def create_subgraph(G,start_node):
171
- # sub_G = nx.MultDiGraph()
172
- # for n in G.successors_iter(start_node):
173
- # sub_G.add_path([start_node,n])
174
- # create_subgraph(G,sub_G,n)
175
-
176
- # Binary info
177
-
178
- """
179
- CREATE TABLE Operation (
180
- opn VARCHAR,
181
- opnid INTEGER,
182
- PRIMARY KEY (opn, opnid)
183
-
184
-
185
-
186
- )
187
-
188
-
189
-
190
-
191
- """
192
-
193
-
194
-
195
-
196
- """
197
- CREATE TABLE Files (
198
- ftype VARCHAR,
199
- unit INTEGER NOT NULL PRIMARY KEY,,
200
- filename VARCHAR
201
- );
202
-
203
- """
204
-
205
-
206
- """
207
- CREATE TABLE GenInfo (
208
- pk INTEGER NOT NULL,
209
- opn VARCHAR,
210
- opnid INTEGER,
211
- PRIMARY KEY (opn, opnid)
212
- iunits INTEGER,
213
- ounits INTEGER,
214
- punit1 INTEGER,
215
- punit2 INTEGER,
216
- BUNIT1 INTEGER,
217
- BUNIT2 INTEGER
218
- );
219
-
220
- """
221
-
222
- # # Files
223
- # files = uci.table('FILES')
224
- # files['FTYPE'] = files['FTYPE'].replace({'WDM': 'WDM1'})
225
- # dfs = []
226
-
227
- # # PerlndInfo
228
- # operation = 'PERLND'
229
- # geninfo = uci.table(operation,'GEN-INFO')
230
- # binaryinfo = uci.table(operation,'BINARY-INFO')
231
- # if operation == 'RCHRES':
232
- # geninfo = geninfo.rename(columns = {'RCHID':'LSID',
233
- # 'BUNITE':'BUNIT1',
234
- # 'BUNITM': 'BUNIT2',
235
- # 'PUNITE': 'PUNIT1',
236
- # 'PUNITM': 'PUNIT2'})
237
- # df = pd.merge(geninfo,binaryinfo, left_index = True, right_index = True, how = 'outer').reset_index()
238
- # df.insert(0,'OPN',operation)
239
- # df = pd.merge(df,files, left_on = 'BUNIT1', right_on = 'UNIT')
240
-
241
- # # Schematic Table
242
- # schematic = uci.table('SCHEMATIC')
243
-
244
- # # Masslink Table
245
- # masslinks = []
246
- # for table_name in uci.table_names('MASS-LINK'):
247
- # mlno = table_name.split('MASS-LINK')[1]
248
- # masslink = uci.table('MASS-LINK',table_name)
249
- # masslink.insert(0,'MLNO',mlno)
250
- # masslinks.append(masslink)
251
- # masslinks = pd.concat(masslinks)
252
-
253
- # #masslinks['QUALID'] = (masslinks['SMEMSB1'].str.strip().replace('','0').astype(int)-1).replace(-1,pd.NA)
254
-
255
-
256
-
257
- # hbn_name = uci.table('PERLND','QUAL-PROPS', int(row['SMEMSB1']) - 1).iloc[0]['QUALID']
258
-
259
- # operation = row['SVOL']
260
- # activity = row['SGRPN']
261
- # ts_name = row['SMEMN']
262
-
263
- # hbn_name = row['SMEMN'] + hbn_name
264
-
265
-
266
- # schematic = uci.table('SCHEMATIC')
267
- # schematic = pd.merge(schematic,masslinks,left_on = 'MLNO',right_on = 'MLNO')
268
-
269
- # all(schematic['SVOL_x'] == schematic['SVOL_y'])
270
- # all(schematic['TVOL_x'] == schematic['TVOL_y'])
271
- # schematic.loc[schematic['TMEMSB1_x'] == '', 'TMEMSB1_y'] = schematic['TMEMSB1_x']
272
- # schematic.loc[schematic['TMEMSB2_x'] == '', 'TMEMSB2_y'] = schematic['TMEMSB2_x']
273
-
274
- # schematic = schematic.drop(columns=['TMEMSB1_y','TMEMSB2_y','TVOL_y','SVOL_y'])
275
- # schematic = schematic.rename(columns = {'SVOL_x':'SVOL',
276
- # 'TVOL_x':'TVOL',
277
- # 'TMEMSB2_x':'TMEMSB2',
278
- # 'TMEMSB1_x':'TMEMSB1'})
279
-
280
-
281
-
282
- # # Watershed Weighted Mean
283
- # subwatersheds = uci.network.subwatersheds()
284
- # subwatersheds = subwatersheds.loc[subwatersheds['SVOL'] == 'PERLND'].reset_index()
285
- # df = cal.model.hbns.get_multiple_timeseries('PERLND',5,'PERO',test['SVOLNO'].values).mean().reset_index()
286
- # df.columns = ['OPNID','value']
287
- # weighted_mean = df[['value','AFACTR']].groupby(df['LSID']).apply(lambda x: (x['value'] * x['AFACTR']).sum() / x['AFACTR'].sum())
288
- # weighted_mean.loc['combined'] = (df['value'] * df['AFACTR']).sum() / df['AFACTR'].sum()
289
-
290
-
291
- # # annual weighted timeseries watershed
292
- # reach_ids = [103,119,104,118]
293
- # subwatersheds = uci.network.subwatersheds().loc[reach_ids]
294
- # subwatersheds = subwatersheds.loc[subwatersheds['SVOL'] == 'PERLND'].reset_index()
295
- # df = cal.model.hbns.get_multiple_timeseries('PERLND',5,'PERO',test['SVOLNO'].values).mean().reset_index()
296
- # df.columns = ['OPNID','value']
297
- # df = pd.merge(subwatersheds,df,left_on = 'SVOLNO', right_on='OPNID')
298
- # weighted_mean = (df['value'] * df['AFACTR']).sum() / df['AFACTR'].sum()
299
- # df[f'weighted_{ts_name}'] = df.groupby('LSID')[parameter].transform(lambda x: (x * df.loc[x.index, 'AFACTR']).sum() / df.loc[x.index, 'AFACTR'].sum())
300
- # weighted_mean.loc['combined'] = (df['value'] * df['AFACTR']).sum() / df['AFACTR'].sum()
301
-
302
-
303
-
304
- # # parameter average weighted by landcover area
305
- # table_name = 'PWAT-PARM2'
306
- # parameter = 'LZSN'
307
- # table_id = 0
308
- # operation = 'PERLND'
309
-
310
- # subwatersheds = uci.network.subwatersheds()
311
- # subwatersheds = subwatersheds.loc[subwatersheds['SVOL'] == 'PERLND'].reset_index()
312
- # df = uci.table(operation,table_name,table_id)[parameter].reset_index()
313
- # df.columns = ['OPNID',parameter]
314
- # df = pd.merge(subwatersheds,df,left_on = 'SVOLNO', right_on='OPNID')
315
- # df[f'weighted_{parameter}'] = df.groupby('TVOLNO')[parameter].transform(lambda x: (x * df.loc[x.index, 'AFACTR']).sum() / df.loc[x.index, 'AFACTR'].sum())
316
-
317
-
318
- # #df[f'weighted_{parameter}'] = df.groupby('LSID')[parameter].transform(lambda x: (x * df.loc[x.index, 'AFACTR']).sum() / df.loc[x.index, 'AFACTR'].sum())
319
-
320
-
321
-
322
-
323
- # extsources = uci.table('EXT SOURCES')
324
- # extsources['SVOL'] = extsources['SVOL'].replace({'WDM': 'WDM1'})
325
-
326
- # df = pd.merge(extsources,df,left_on = 'SVOL',right_on = 'FTYPE',how = 'right')
327
-
328
-
329
- # exttargets = uci.table('EXT TARGETS')
330
- # schematic = uci.table('SCHEMATIC')
331
-
332
-
333
- #%% Methods using universal node id
334
-
335
- # def bypass_node(G, node):
336
- # preds = list(G.predecessors(node))
337
- # succs = list(G.successors(node))
338
- # for u in preds:
339
- # for v in succs:
340
- # if
341
- # G.add_edge(u, v)
342
- # G.remove_node(node)
343
-
344
121
  def _add_subgraph_labels(G,G_sub):
345
122
  G_sub.labels = {label:node for label, node in G.labels.items() if node in G_sub.nodes}
346
123
  return G_sub
@@ -424,18 +201,29 @@ def nodes(G,node_type,node_type_id,adjacent_node_type):
424
201
 
425
202
  #%% Methods using node_type, node_type_id interface
426
203
 
427
- def upstream_network(G,reach_id):
428
- return G.subgraph(nx.ancestors(G,get_node_id(G,'RCHRES',reach_id))).copy()
204
+ def upstream_network(G,reach_ids):
205
+ node_ids = [get_node_id(G,'RCHRES',reach_id) for reach_id in reach_ids]
206
+ # Initialize an empty set to store all unique ancestors
207
+
208
+ all_ancestors = set()
209
+ # Iterate through the target nodes and find ancestors for each
210
+ for node_id in node_ids:
211
+ ancestors_of_node = nx.ancestors(G, node_id)
212
+ all_ancestors.update(ancestors_of_node) # Add ancestors to the combined set
213
+
214
+ all_ancestors.update(node_ids) # Include the target nodes themselves
215
+ return G.subgraph(all_ancestors).copy()
216
+ #return G.subgraph([node_id] + list(nx.ancestors(G,node_id))).copy()
429
217
 
430
218
  def downstream_network(G,reach_id):
431
- return G.subgraph(nx.descendants(G,get_node_id(G,'RCHRES',reach_id))).copy()
219
+ node_id = get_node_id(G,'RCHRES',reach_id)
220
+ return G.subgraph([node_id] + list(nx.descendants(G,node_id))).copy()
432
221
 
433
- def subset_network(G,reach_id,upstream_reach_ids = None):
434
- G = upstream_network(G,reach_id)
222
+ def subset_network(G,reach_ids,upstream_reach_ids = None):
223
+ G = upstream_network(G,reach_ids)
435
224
  if upstream_reach_ids is not None:
436
- [G.remove_nodes_from(nx.ancestors(G,upstream_reach_id)) for upstream_reach_id in upstream_reach_ids if upstream_reach_id in G.nodes]
437
- [G.remove_nodes_from([upstream_reach_id]) for upstream_reach_id in upstream_reach_ids if upstream_reach_id in G.nodes]
438
- #assert([len(sinks(G)) == 0,sinks(G)[0] == reach_id])
225
+ G.remove_nodes_from(get_node_ids(upstream_network(G,upstream_reach_ids),'RCHRES'))
226
+ #assert([len(sinks(G)) == 0,sinks(G)[0] == reach_id])
439
227
  return G
440
228
 
441
229
  def upstream_nodes(G,reach_id,upstream_node_type):
@@ -512,8 +300,8 @@ def routing_reachs(G):
512
300
  def is_routing(G,reach_id):
513
301
  return all([node['type'] not in ['PERLND', 'IMPLND'] for node in adjacent_nodes(G,reach_id)])
514
302
 
515
- def watershed_area(G,reach_ids):
516
- return float(np.nansum(list(nx.get_edge_attributes(make_watershed(G,reach_ids),'area').values())))
303
+ def watershed_area(G,reach_ids,upstream_reach_ids = None):
304
+ return float(np.nansum(list(nx.get_edge_attributes(make_watershed(G,reach_ids,upstream_reach_ids),'area').values())))
517
305
 
518
306
  def catchment_area(G,reach_id):
519
307
  return float(np.nansum(list(nx.get_edge_attributes(make_catchment(G,reach_id),'area').values())))
@@ -553,22 +341,47 @@ def make_catchment(G,reach_id):
553
341
  nx.set_node_attributes(catchment,node_id,'catchment_id')
554
342
  return catchment
555
343
 
556
- from itertools import chain
557
-
558
- def make_watershed(G,reach_ids):
344
+ def make_watershed(G,reach_ids,upstream_reach_ids = None):
559
345
  '''
560
346
  Creates a sugraph representing the the catchments upstream of the specified hspf model reaches. Note that a negative reach_ids indicate to subtract that area from the total.
561
347
 
562
348
 
563
349
  '''
564
- node_ids = set([get_node_id(G,'RCHRES',reach_id) for reach_id in reach_ids if reach_id > 0])
565
- nodes_to_exclude = set([get_node_id(G,'RCHRES',abs(reach_id)) for reach_id in reach_ids if reach_id < 0])
566
- node_ids = node_ids - nodes_to_exclude
350
+
351
+ node_ids = set(get_node_id(G,'RCHRES',reach_id) for reach_id in reach_ids)
352
+
353
+ # Initialize an empty set to store all unique ancestors
567
354
 
355
+ # Iterate through the target nodes and find ancestors for each
356
+ all_upstream_reaches = set()
357
+ for node_id in node_ids:
358
+ ancestors_of_node = [node['id'] for node in ancestors(G, node_id,'RCHRES')]
359
+ all_upstream_reaches.update(ancestors_of_node) # Add ancestors to the combined set
360
+ all_upstream_reaches.update(node_ids) # Include the target nodes themselves
361
+
362
+ if upstream_reach_ids is not None:
363
+ upstream_node_ids = set(get_node_id(G,'RCHRES',reach_id) for reach_id in upstream_reach_ids)
364
+ for node_id in upstream_node_ids:
365
+ ancestors_of_node = [node['id'] for node in ancestors(G, node_id,'RCHRES')]
366
+ all_upstream_reaches = all_upstream_reaches - set(ancestors_of_node)
367
+ else:
368
+ upstream_node_ids = set()
369
+
370
+ nodes = set(chain.from_iterable([list(G.predecessors(node_id)) for node_id in all_upstream_reaches])) | node_ids
371
+ nodes = nodes - upstream_node_ids # Include the target nodes themselves
372
+
373
+
374
+ return G.subgraph(nodes).copy()
375
+
376
+
377
+ # node_ids = set([get_node_id(G,'RCHRES',reach_id) for reach_id in reach_ids if reach_id > 0])
378
+ # nodes_to_exclude = set([get_node_id(G,'RCHRES',abs(reach_id)) for reach_id in reach_ids if reach_id < 0])
379
+ # node_ids = node_ids - nodes_to_exclude
568
380
 
569
- nodes = [list(nx.ancestors(G,node_id)) for node_id in node_ids]
570
- nodes.append(node_ids)
571
- nodes = list(set(chain.from_iterable(nodes)))
381
+ #nodes = get_opnids(G,'RCHRES',reach_ids,upstream_reach_ids) #[ancestors(G,node_id,'RCHRES')) for node_id in node_ids]
382
+ nodes = subset_network(G,reach_ids,upstream_reach_ids)
383
+ #nodes.append(node_ids)
384
+ #nodes = list(set(chain.from_iterable(nodes)))
572
385
  watershed = subgraph(G, nodes)
573
386
  catchment_id = '_'.join([str(reach_id) for reach_id in reach_ids])
574
387
  nx.set_node_attributes(watershed,node_ids,catchment_id)
@@ -654,8 +467,17 @@ class Catchment():
654
467
  def dsn(self,tmemn):
655
468
  return [self.catchment.nodes[k[0]]['id'] for k,v in nx.get_edge_attributes(self.catchment,'tmemn').items() if v == tmemn]
656
469
 
657
- def to_dataframe():
658
- return
470
+ def to_dataframe(self):
471
+ edges = []
472
+ for u, v, edge_data in self.catchment.edges(data=True):
473
+ source_node_attributes = self.catchment.nodes[u]
474
+ # Add or update edge attributes with source node attributes
475
+ edge_data["source_type"] = source_node_attributes.get("type")
476
+ edge_data["source_name"] = source_node_attributes.get("name")
477
+ edge_data["source_type_id"] = source_node_attributes.get("type_id")
478
+ edges.append(edge_data)
479
+
480
+ return pd.DataFrame(edges)
659
481
  # def _watershed(G,reach_id):
660
482
 
661
483
  # predecessors = (list(G.predecessors(node)))
@@ -676,7 +498,17 @@ class Catchment():
676
498
 
677
499
  # {source:[node for node in nx.shortest_path(G,source,reach_id)] for source in nx.ancestors(G,reach_id)}
678
500
 
679
-
501
+ def to_dataframe(G):
502
+ edges = []
503
+ for u, v, edge_data in G.edges(data=True):
504
+ source_node_attributes = G.nodes[u]
505
+ # Add or update edge attributes with source node attributes
506
+ edge_data["source_type"] = source_node_attributes.get("type")
507
+ edge_data["source_name"] = source_node_attributes.get("name")
508
+ edge_data["source_type_id"] = source_node_attributes.get("type_id")
509
+ edges.append(edge_data)
510
+
511
+ return pd.DataFrame(edges)
680
512
 
681
513
 
682
514
  #%% Legacy Methods for Backwards compatability
@@ -710,8 +542,16 @@ class reachNetwork():
710
542
  downstream.insert(0,reach_id)
711
543
  return downstream
712
544
 
713
- def calibration_order(self,reach_id,upstream_reach_ids = None):
714
- return calibration_order(self.G,reach_id,upstream_reach_ids)
545
+ def calibration_order(self,reach_ids,upstream_reach_ids = None):
546
+ '''
547
+ Calibration order of reaches to prevent upstream influences. Equivalent to iteritivlye pruning the network remving nodes with no upstream connections.
548
+ A list of lists is returned where each sublist contains reaches that can be calibrated in parallel.
549
+
550
+ :param self: Description
551
+ :param reach_ids: Description
552
+ :param upstream_reach_ids: Description
553
+ '''
554
+ return calibration_order(make_watershed(self.G,reach_ids,upstream_reach_ids))
715
555
 
716
556
  def station_order(self,reach_ids):
717
557
  raise NotImplementedError()
@@ -731,30 +571,30 @@ class reachNetwork():
731
571
  '''
732
572
  return [node['type_id'] for node in predecessors(self.G,'RCHRES',get_node_id(self.G,'RCHRES',reach_id))]
733
573
 
734
- def get_opnids(self,operation,reach_id, upstream_reach_ids = None):
574
+ def get_opnids(self,operation,reach_ids, upstream_reach_ids = None):
735
575
  '''
736
576
  Operation IDs with a path to reach_id. Operations upstream of upstream_reach_ids will not be included
737
577
 
738
578
  '''
739
- return get_opnids(self.G,operation=operation,reach_id = reach_id, upstream_reach_ids = upstream_reach_ids)
740
-
579
+ return get_opnids(self.G,operation,reach_ids,upstream_reach_ids)
741
580
  def operation_area(self,operation,opnids = None):
581
+ '''
582
+ Area of operation type for specified operation IDs. If None returns all operation areas.
583
+ Equivalent to the schematic table filtered by operation and opnids.
584
+ '''
585
+
742
586
  return operation_area(self.uci,operation)
743
587
 
744
588
  def drainage(self,reach_id):
745
- # Merge source node attributes into edge attributes
746
-
747
- edges = []
748
- for u, v, edge_data in make_catchment(self.G,reach_id).edges(data=True):
749
- source_node_attributes = self.G.nodes[u]
750
- # Add or update edge attributes with source node attributes
751
- edge_data["source_type"] = source_node_attributes.get("type")
752
- edge_data["source_name"] = source_node_attributes.get("name")
753
- edge_data["source_type_id"] = source_node_attributes.get("type_id")
754
- edges.append(edge_data)
589
+ '''
590
+ Docstring for drainage
755
591
 
756
- return pd.DataFrame(edges)
757
-
592
+ :param self: Network class instance
593
+ :param reach_id: Target reach id
594
+ '''
595
+ # Merge source node attributes into edge attributes
596
+ return to_dataframe(make_catchment(self.G,reach_id))
597
+
758
598
  def subwatersheds(self,reach_ids = None):
759
599
  df = subwatersheds(self.uci)
760
600
  if reach_ids is None:
@@ -773,15 +613,16 @@ class reachNetwork():
773
613
  def reach_contributions(self,operation,opnids):
774
614
  return reach_contributions(self.uci,operation,opnids)
775
615
 
776
- def drainage_area(self,reach_ids):
777
- return watershed_area(self.G,reach_ids)
616
+ def drainage_area(self,reach_ids,upstream_reach_ids = None):
617
+ return watershed_area(self.G,reach_ids,upstream_reach_ids)
778
618
 
779
- def drainage_area_landcover(self,reach_id,group = True):
780
- reach_ids = self._upstream(reach_id)
781
- areas = pd.concat([self.subwatershed(reach_id) for reach_id in reach_ids]).groupby(['SVOL','SVOLNO'])['AFACTR'].sum()
782
-
783
- if group:
784
- areas = pd.concat([areas[operation].groupby(self.uci.opnid_dict[operation].loc[areas[operation].index,'LSID'].values).sum() for operation in ['PERLND','IMPLND']])
619
+ def drainage_area_landcover(self,reach_ids,upstream_reach_ids = None, group = True):
620
+ areas = to_dataframe(make_watershed(self.G,reach_ids,upstream_reach_ids))
621
+ areas = areas.groupby(['source_type','source_type_id','source_name'])['area'].sum()[['PERLND','IMPLND']]
622
+
623
+ if group:
624
+ areas = pd.concat([areas[operation].groupby('source_name').sum() for operation in ['PERLND','IMPLND']])
625
+ #areas = pd.concat([areas[operation].groupby(self.uci.opnid_dict[operation].loc[areas[operation].index,'LSID'].values).sum() for operation in ['PERLND','IMPLND']])
785
626
  return areas
786
627
 
787
628
  def outlets(self):
@@ -799,48 +640,28 @@ class reachNetwork():
799
640
  def paths(self,reach_id):
800
641
  return paths(self.G,reach_id)
801
642
 
802
-
803
- def calibration_order(G,reach_id,upstream_reach_ids = None):
643
+
644
+ def get_opnids(G,operation,reach_ids, upstream_reach_ids = None):
645
+ return get_node_type_ids(make_watershed(G,reach_ids,upstream_reach_ids),operation)
646
+
647
+
648
+ def calibration_order(G):
804
649
  '''
805
- Determines the order in which the specified reaches should be calibrated to
650
+ Determines the order in which the model reaches should be calibrated to
806
651
  prevent upstream influences. Primarily helpful when calibrating sediment and
807
652
  adjusting in channel erosion rates.
808
653
  '''
809
654
 
655
+ nodes = get_node_ids(G,'RCHRES')
656
+ G = G.subgraph(nodes).copy()
810
657
  order = []
811
- Gsub = subgraph(G,get_node_ids(G,'RCHRES'))
812
- while(len(Gsub.nodes)) > 0:
813
-
814
- nodes_to_remove = [node for node, in_degree in Gsub.in_degree() if in_degree == 0]
658
+ while(len(nodes)) > 0:
659
+ nodes_to_remove = [node for node in nodes if G.in_degree(node) == 0]
815
660
  order.append([G.nodes[node]['type_id'] for node in nodes_to_remove])
816
- Gsub.remove_nodes_from(nodes_to_remove)
661
+ nodes = [node for node in nodes if node not in nodes_to_remove]
662
+ G.remove_nodes_from(nodes_to_remove)
817
663
  return order
818
-
819
-
820
-
821
-
822
- def get_opnids(G,operation,reach_id = None, upstream_reach_ids = None):
823
- G = subset_network(G,reach_id,upstream_reach_ids)
824
- perlnds = [node['type_id'] for node in get_nodes(G,'PERLND')]
825
- implnds = [node['type_id'] for node in get_nodes(G,'IMPLND')]
826
- reachs = [node['type_id'] for node in get_nodes(G,'RCHRES')]
827
- return {'RCHRES':reachs,'PERLND':perlnds,'IMPLND':implnds}[operation]
828
- #return reachs,perlnds,implnds
829
-
830
- def drainage(uci,reach_ids):
831
- return subwatersheds(uci).loc[reach_ids].reset_index()[['SVOL','LSID','AFACTR']].groupby(['LSID','SVOL']).sum()
832
-
833
-
834
-
835
- def drainage_area(uci,reach_ids,drng_area = 0):
836
- if len(reach_ids) == 0:
837
- return drng_area
838
- else:
839
- sign = math.copysign(1,reach_ids[0])
840
- reach_id = int(reach_ids[0]*sign)
841
- drng_area = drng_area + sign*uci.network.drainage_area(reach_id)
842
- drainage_area(uci,reach_ids[1:],drng_area)
843
-
664
+
844
665
 
845
666
  def reach_contributions(uci,operation,opnids):
846
667
  schematic = uci.table('SCHEMATIC').set_index('SVOL')
hspf/parser/parsers.py CHANGED
@@ -6,6 +6,7 @@ Created on Fri Oct 7 12:13:23 2022
6
6
  """
7
7
 
8
8
  from abc import abstractmethod
9
+ from multiprocessing.util import info
9
10
  import numpy as np
10
11
  import pandas as pd
11
12
  from pathlib import Path
@@ -52,7 +53,10 @@ class Table():
52
53
 
53
54
  self.parser = parserSelector[self.block]
54
55
  #self.updater = Updater
55
-
56
+
57
+ def _delimiters(self):
58
+ return delimiters(self.block,self.name)
59
+
56
60
  def parse(self):
57
61
  self.data = self.parser.parse(self.block,self.name,self.lines)
58
62
 
@@ -286,7 +290,29 @@ class masslinkParser(Parser):
286
290
  table_lines[index] = line[-1]
287
291
 
288
292
  return table_lines
289
-
293
+
294
+ class globalParser(Parser):
295
+ def parse(block,table_name,table_lines):
296
+ table_lines = [line for line in table_lines if '***' not in line]
297
+ data = {
298
+ 'description' : table_lines[0].strip(),
299
+ 'start_date' : table_lines[1].split('END')[0].split()[1],
300
+ 'start_hour' : int(table_lines[1].split('END')[0].split()[2][:2])-1,
301
+ 'end_date' : table_lines[1].strip().split('END')[1].split()[0],
302
+ 'end_hour' : int(table_lines[1].strip().split('END')[1].split()[1][:2])-1,
303
+ 'echo_flag1' : int(table_lines[2].split()[-2]),
304
+ 'echo_flag2' : int(table_lines[3].split()[-1]),
305
+ 'units_flag' : int(table_lines[3].split()[5]),
306
+ 'resume_flag': int(table_lines[3].split()[1]),
307
+ 'run_flag': int(table_lines[3].split()[3])
308
+ }
309
+ df = pd.DataFrame([data])
310
+ df['comments'] = ''
311
+ return df
312
+
313
+ def write(block,table_name,table):
314
+ raise NotImplementedError()
315
+
290
316
  class specactionsParser(Parser):
291
317
  def parse(block,table,lines):
292
318
  raise NotImplementedError()
@@ -301,7 +327,7 @@ class externalsourcesParser():
301
327
  def write(block,table,lines):
302
328
  raise NotImplementedError()
303
329
 
304
- parserSelector = {'GLOBAL':defaultParser,
330
+ parserSelector = {'GLOBAL':globalParser,
305
331
  'FILES':standardParser,
306
332
  'OPN SEQUENCE':opnsequenceParser,
307
333
  'PERLND':operationsParser,
hspf/reports.py CHANGED
@@ -325,6 +325,7 @@ def ann_avg_subwatershed_loading(constituent,uci,hbn):
325
325
  return df
326
326
 
327
327
  def ann_avg_watershed_loading(constituent,reach_ids,uci,hbn, by_landcover = False):
328
+ reach_ids = [item for sublist in [uci.network._upstream(reach_id) for reach_id in reach_ids] for item in sublist]
328
329
  df = ann_avg_constituent_loading(constituent,uci,hbn)
329
330
  df = df.loc[df['TVOLNO'].isin(reach_ids)]
330
331
  if by_landcover:
hspf/uci.py CHANGED
@@ -8,6 +8,7 @@ Created on Mon Jul 11 08:39:57 2022
8
8
 
9
9
  #lines = reader('C:/Users/mfratki/Documents/Projects/LacQuiParle/ucis/LacQuiParle_0.uci')
10
10
  import subprocess
11
+ import sys
11
12
  import numpy as np
12
13
  import pandas as pd
13
14
  from .parser.parsers import Table
@@ -78,7 +79,7 @@ class UCI():
78
79
 
79
80
  def table(self,block,table_name = 'na',table_id = 0,drop_comments = True):
80
81
  # Dynamic parsing of tables when called by user
81
- assert block in ['FILES','PERLND','IMPLND','RCHRES','SCHEMATIC','OPN SEQUENCE','MASS-LINK','EXT SOURCES','NETWORK','GENER','MONTH-DATA','EXT TARGETS','COPY','FTABLES']
82
+ assert block in ['GLOBAL','FILES','PERLND','IMPLND','RCHRES','SCHEMATIC','OPN SEQUENCE','MASS-LINK','EXT SOURCES','NETWORK','GENER','MONTH-DATA','EXT TARGETS','COPY','FTABLES']
82
83
 
83
84
  table = self.uci[(block,table_name,table_id)] #[block][table_name][table_id]
84
85
  #TODO move the format_opnids into the Table class?
@@ -103,7 +104,7 @@ class UCI():
103
104
  self.uci[(block,table_name,table_id)].replace(table)
104
105
 
105
106
  def table_lines(self,block,table_name = 'na',table_id = 0):
106
- return self.uci[(block,table_name,table_id)].lines
107
+ return self.uci[(block,table_name,table_id)].lines.copy()
107
108
 
108
109
  def comments(block,table_name = None,table_id = 0): # comments of a table
109
110
  raise NotImplementedError()
@@ -176,13 +177,80 @@ class UCI():
176
177
  lines += ['']
177
178
  lines += ['END RUN']
178
179
  self.lines = lines
180
+
181
+ def set_simulation_period(self,start_year,end_year):
182
+ # Update GLOBAL table with new start and end dates very janky implementation but not a priority.
183
+
184
+ # if start_hour < 10:
185
+ # start_hour = f'0{int(start_hour+1)}:00'
186
+ # else:
187
+ # start_hour = f'{int(start_hour+1)}:00'
179
188
 
180
-
189
+ # if end_hour < 10:
190
+ # end_hour = f'0{int(end_hour+1)}:00'
191
+ # else:
192
+ # end_hour = f'{int(end_hour+1)}:00'
193
+
194
+ table_lines = self.table_lines('GLOBAL')
195
+ for index, line in enumerate(table_lines):
196
+ if '***' in line: #in case there are comments in the global block
197
+ continue
198
+ elif line.strip().startswith('START'):
199
+ table_lines[index] = line[0:14] + f'{start_year}/01/01 00:00 ' + f'END {end_year}/12/31 24:00'
200
+ else:
201
+ continue
202
+
203
+ self.uci[('GLOBAL','na',0)].lines = table_lines
204
+
205
+ def set_echo_flags(self,flag1,flag2):
206
+ table_lines = self.table_lines('GLOBAL')
207
+ for index, line in enumerate(table_lines):
208
+ if '***' in line: #in case there are comments in the global block
209
+ continue
210
+ elif line.strip().startswith('RUN INTERP OUTPT LEVELS'):
211
+ table_lines[index] = f' RUN INTERP OUTPT LEVELS {flag1} {flag2}'
212
+ else:
213
+ continue
214
+
215
+
216
+ self.uci[('GLOBAL','na',0)].lines = table_lines
217
+
218
+
219
+ def _write(self,filepath):
220
+ with open(filepath, 'w') as the_file:
221
+ for line in self.lines:
222
+ the_file.write(line+'\n')
223
+
224
+ def add_parameter_template(self,block,table_name,table_id,parameter,tpl_char = '~'):
225
+
226
+ table = self.table(block,table_name,0,False).reset_index()
227
+ column_names,dtypes,starts,stops = self.uci[(block,table_name,table_id)]._delimiters()
228
+
229
+ width = stops[column_names.index(parameter)] - starts[column_names.index(parameter)]
230
+
231
+ ids = ~table[parameter].isna() # Handle comment lines in uci
232
+
233
+ # Replace paramter name with PEST/PEST++ specification. Note this does not use the HSPF supplemental file so parameters are limited to width of uci file column
234
+ pest_param = tpl_char + parameter.lower() + table.loc[ids,'OPNID'].astype(str)
235
+ pest_param = pest_param.apply(lambda name: name + ' '*(width-len(name)-1)+ tpl_char)
236
+
237
+ table.loc[ids,parameter] = pest_param
238
+ table = table.set_index('OPNID')
239
+ self.replace_table(table,block,table_name,table_id)
240
+
241
+ def write_tpl(self,tpl_char = '~',new_tpl_path = None):
242
+ if new_tpl_path is None:
243
+ new_tpl_path = self.filepath.parent.joinpath(self.filepath.stem + '.tpl')
244
+ self.merge_lines()
245
+ self.lines.insert(0,tpl_char)
246
+ self._write(new_tpl_path)
247
+
181
248
  def write(self,new_uci_path):
182
249
  self.merge_lines()
183
- with open(new_uci_path, 'w') as the_file:
184
- for line in self.lines:
185
- the_file.write(line+'\n')
250
+ self._write(new_uci_path)
251
+
252
+ def _run(self,wait_for_completion=True):
253
+ run_model(self.filepath, wait_for_completion=wait_for_completion)
186
254
 
187
255
  def update_bino(self,name):
188
256
  #TODO: Move up to busniess/presentation layer
@@ -298,9 +366,25 @@ class UCI():
298
366
 
299
367
  #TODO: More conveince methods that should probably be in a separate module
300
368
 
301
- def run_model(uci_file):
302
- winHSPF = str(Path(__file__).resolve().parent.parent) + '\\bin\\WinHSPFLt\\WinHspfLt.exe'
303
- subprocess.run([winHSPF,uci_file.as_posix()]) #, stdout=subprocess.PIPE, creationflags=0x08000000)
369
+ def run_model(uci_file, wait_for_completion=True):
370
+ winHSPF = str(Path(__file__).resolve().parent.parent) + '\\bin\\WinHSPFlt\\WinHspfLt.exe'
371
+
372
+ # Arguments for the subprocess
373
+ args = [winHSPF, uci_file.as_posix()]
374
+
375
+ if wait_for_completion:
376
+ # Use subprocess.run to wait for the process to complete (original behavior)
377
+ subprocess.run(args)
378
+ else:
379
+ # Use subprocess.Popen to run the process in the background without waiting
380
+ # On Windows, you can use creationflags to prevent a console window from appearing
381
+ if sys.platform.startswith('win'):
382
+ # Use a variable for the flag to ensure it's only used on Windows
383
+ creationflags = subprocess.CREATE_NO_WINDOW
384
+ subprocess.Popen(args, creationflags=creationflags)
385
+ else:
386
+ # For other platforms (like Linux/macOS), Popen without special flags works fine
387
+ subprocess.Popen(args)
304
388
 
305
389
  def get_filepaths(uci,file_extension):
306
390
  files = uci.table('FILES')
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hspf
3
- Version: 2.0.2
3
+ Version: 2.1.0
4
4
  Summary: Python package for downloading and running HSPF models
5
5
  Project-URL: Homepage, https://github.com/mfratkin1/pyHSPF
6
6
  Author-email: Mulu Fratkin <michael.fratkin@state.mn.us>
@@ -1,11 +1,12 @@
1
1
  hspf/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- hspf/hbn.py,sha256=SQMxWllZy5OxWGMkhmjiardb8vbSjrmENJrorLBqTDI,19476
2
+ hspf/hbn.py,sha256=X-xFTrJ9Z7rM_spMaZxyUIXisUKEm36Gq0gCG-mYuSY,18982
3
3
  hspf/helpers.py,sha256=djKc12ZZkJmB_cHEbFm-mk8sp4GAbBNfjXxfp7YAELU,3132
4
- hspf/hspfModel.py,sha256=8XFPd89niSn9bNTjB2UUpoLNAs6wsD6i6Lb9YKoYjUU,8090
5
- hspf/reports.py,sha256=DfS9DoNwrnD3UvxO879i-bM2gWh5QUMxrV4mdRDgpfE,51878
6
- hspf/uci.py,sha256=kb05yc_pmO7w8e8tDOxD7Ln0PxhYALmU8yeoQZHRw9g,30323
4
+ hspf/hspfModel.py,sha256=K_xF7HtuMpDMod56Z3IXDCeGsnUi8KGhly_9tm-mxoY,9070
5
+ hspf/reports.py,sha256=ALAeGP0KYdsFUnzAV5BZ784NRDxKgn42GKZpyq3E4xU,51997
6
+ hspf/uci.py,sha256=rsi_KJqdfBFp0rlKCHyhmQGdB_rgNE8k6abTjH26UqE,33982
7
7
  hspf/wdm.py,sha256=q0hNqsMNrTkxHtKEX0q0wWlIZabXv6UX2HjNCF9WEW4,12734
8
8
  hspf/wdmReader.py,sha256=-akKWB9SpUzXvXoQMeHLZNi_u584KaeEOyHB-YolTWM,22848
9
+ hspf/bin/WinHSPFLt/WinHspfLt.exe,sha256=Afs_nJ62r1VnTL2P4XfiRJ1sH2If5DeGTbcCzoqlanE,74752
9
10
  hspf/data/ParseTable.csv,sha256=ExqUaZg_uUPF5XHGLJEk5_jadnDenKjbwqC4d-iNX_M,193609
10
11
  hspf/data/Timeseries Catalog/IMPLND/IQUAL.txt,sha256=r36wt2gYtHKr5SkOcVnpyk5aYZF743AgkJ5o7CvHlIc,1000
11
12
  hspf/data/Timeseries Catalog/IMPLND/IWATER.txt,sha256=JZ03DFMq8e3EcflRSQ_BPYIeKe8TH3WYEUMmTF2OQEs,743
@@ -27,8 +28,8 @@ hspf/data/Timeseries Catalog/RCHRES/OXRX.txt,sha256=NWdRFpJ60LsYzCGHjt8Llay3OI8j
27
28
  hspf/data/Timeseries Catalog/RCHRES/PLANK.txt,sha256=0MAehIrF8leYQt0Po-9h6IiujzoWOlw-ADCV-bPiqs0,3508
28
29
  hspf/data/Timeseries Catalog/RCHRES/SEDTRN.txt,sha256=SiTgD4_YWctTgEfhoMymZfv8ay74xzCRdnI005dXjyE,659
29
30
  hspf/parser/__init__.py,sha256=2HvprGVCaJ9L-egvTj1MI-bekq5CNjtSBZfrCtQi3fs,92
30
- hspf/parser/graph.py,sha256=zjIhr-6y8Iy4jJM-V__aP7Q3zkRzr8PajuHjgqy3cBk,37085
31
- hspf/parser/parsers.py,sha256=Uk7RD_9aAX4TTjUOyPfwdURPdmE2M2_YItny2PedGi4,19717
32
- hspf-2.0.2.dist-info/METADATA,sha256=EOwcX8KhToElTZ44rqHbD7mD53d7Nx33ttyHcyZckQc,605
33
- hspf-2.0.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
34
- hspf-2.0.2.dist-info/RECORD,,
31
+ hspf/parser/graph.py,sha256=AQkimUP232P3STLFVN2sfrpwiEdGvcc-244TQb8RIgs,32595
32
+ hspf/parser/parsers.py,sha256=x3othxQogUmGNe_ctCU20atDrRM_B4lEbVJb3EMbwto,20850
33
+ hspf-2.1.0.dist-info/METADATA,sha256=SHh9Lng8KdpN2vjaFZe5dFeFI5RsxypQEIrhGlu8G6s,605
34
+ hspf-2.1.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
35
+ hspf-2.1.0.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: hatchling 1.27.0
2
+ Generator: hatchling 1.28.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any