hspf 2.1.2__tar.gz → 2.1.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {hspf-2.1.2 → hspf-2.1.3}/.gitignore +2 -2
- {hspf-2.1.2 → hspf-2.1.3}/PKG-INFO +1 -1
- {hspf-2.1.2 → hspf-2.1.3}/pyproject.toml +1 -1
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/hbn.py +149 -27
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/hspfModel.py +47 -8
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/parser/graph.py +2 -1
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/reports.py +320 -132
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/uci.py +16 -5
- hspf-2.1.3/tests/data/CLWM.ech +0 -0
- hspf-2.1.3/tests/data/Clearwater.log +4 -0
- {hspf-2.1.2 → hspf-2.1.3}/tests/data/Clearwater.uci +1 -1
- hspf-2.1.3/tests/data/clwtr.out +0 -0
- hspf-2.1.3/tests/test.tpl +35805 -0
- hspf-2.1.3/tests/test_reports.py +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/.gitattributes +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/BigFork.py +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/MANIFEST.in +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/setup.py +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/Masslink_Timeseries.csv +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/__init__.py +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/bin/WinHSPFLt/ATCoRend.dbf +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/bin/WinHSPFLt/ATCoUnits.mdb +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/bin/WinHSPFLt/ERROR.FIL +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/bin/WinHSPFLt/LF90.EER +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/bin/WinHSPFLt/LF90WIOD.DLL +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/bin/WinHSPFLt/MapWinUtility.dll +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/bin/WinHSPFLt/StatusMonitor.exe +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/bin/WinHSPFLt/WinHspfLt.exe +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/bin/WinHSPFLt/hass_ent.dll +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/bin/WinHSPFLt/hspfmsg.wdm +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/bin/WinHSPFLt/hspfmsg.wdu +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/build_warehouse.py +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/HSPFParameterRanges.csv +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/LandUseNames_Mappings.csv +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/ParseTable.csv +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/Timeseries Catalog/IMPLND/IQUAL.txt +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/Timeseries Catalog/IMPLND/IWATER.txt +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/Timeseries Catalog/IMPLND/IWTGAS.txt +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/Timeseries Catalog/IMPLND/SOLIDS.txt +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/Timeseries Catalog/PERLND/MSTLAY.txt +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/Timeseries Catalog/PERLND/PQUAL.txt +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/Timeseries Catalog/PERLND/PSTEMP.txt +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/Timeseries Catalog/PERLND/PWATER.txt +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/Timeseries Catalog/PERLND/PWATGAS.txt +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/Timeseries Catalog/PERLND/SEDMNT.txt +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/Timeseries Catalog/PERLND/SNOW.txt +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/Timeseries Catalog/RCHRES/CONS.txt +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/Timeseries Catalog/RCHRES/GQUAL.txt +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/Timeseries Catalog/RCHRES/HTRCH.txt +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/Timeseries Catalog/RCHRES/HYDR.txt +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/Timeseries Catalog/RCHRES/NUTRX.txt +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/Timeseries Catalog/RCHRES/OXRX.txt +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/Timeseries Catalog/RCHRES/PLANK.txt +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/data/Timeseries Catalog/RCHRES/SEDTRN.txt +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/hbn2.py +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/hbn_cy.c +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/hbn_cy.html +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/hbn_cy.pyx +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/helpers.py +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/parser/__init__.py +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/parser/parsers.py +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/validations.py +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/warehouse.py +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/wdm.py +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/src/hspf/wdmReader.py +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/tests/__init__.py +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/tests/data/Clearwater.tpl +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/tests/test_graph.py +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/tests/test_hbn.py +0 -0
- {hspf-2.1.2 → hspf-2.1.3}/tests/test_uci.py +0 -0
|
@@ -1,3 +1,3 @@
|
|
|
1
1
|
*.pyc
|
|
2
|
-
*.whl
|
|
3
|
-
*.gz
|
|
2
|
+
*.whl
|
|
3
|
+
*.gz
|
|
@@ -6,7 +6,7 @@ nutrients relevant for our current calibration methods. (See calibration_helpers
|
|
|
6
6
|
|
|
7
7
|
@author: mfratki
|
|
8
8
|
"""
|
|
9
|
-
from
|
|
9
|
+
from hspf import helpers
|
|
10
10
|
import pandas as pd
|
|
11
11
|
import math
|
|
12
12
|
from struct import unpack
|
|
@@ -14,6 +14,7 @@ from numpy import fromfile
|
|
|
14
14
|
from pandas import DataFrame
|
|
15
15
|
from datetime import datetime, timedelta #, timezone
|
|
16
16
|
from collections import defaultdict
|
|
17
|
+
from collections.abc import MutableMapping
|
|
17
18
|
#from pathlib import Path
|
|
18
19
|
|
|
19
20
|
|
|
@@ -189,11 +190,30 @@ class hbnInterface:
|
|
|
189
190
|
def _clear_cache(self):
|
|
190
191
|
[hbn._clear_cache() for hbn in self.hbns]
|
|
191
192
|
|
|
193
|
+
|
|
194
|
+
|
|
192
195
|
def get_time_series(self, t_opn, t_cons, t_code, opnid, activity = None):
|
|
193
|
-
|
|
196
|
+
df = pd.concat([hbn._get_time_series(t_opn, t_cons, t_code, opnid, activity) for hbn in self.hbns],axis = 1)
|
|
197
|
+
if df.empty:
|
|
198
|
+
raise ValueError(f"No data found for {t_opn} {t_cons} {t_code} {opnid} {activity}")
|
|
199
|
+
|
|
200
|
+
if long_format:
|
|
201
|
+
df = df.reset_index().melt(id_vars = ['index'],var_name = 'OPNID',value_name = t_con)
|
|
202
|
+
df.rename(columns = {'index':'datetime'},inplace = True)
|
|
203
|
+
df['OPERATION'] = t_opn
|
|
204
|
+
return df
|
|
205
|
+
|
|
206
|
+
def get_multiple_timeseries(self,t_opn,t_code,t_con,opnids = None,activity = None,axis = 1,long_format = False):
|
|
207
|
+
df = pd.concat([hbn._get_multiple_timeseries(t_opn,t_code,t_con,opnids,activity) for hbn in self.hbns],axis = 1)
|
|
208
|
+
if df.empty:
|
|
209
|
+
raise ValueError(f"No data found for {t_opn} {t_con} {t_code} {opnids} {activity}")
|
|
194
210
|
|
|
195
|
-
|
|
196
|
-
|
|
211
|
+
if long_format:
|
|
212
|
+
df = df.reset_index().melt(id_vars = ['index'],var_name = 'OPNID',value_name = 'value')
|
|
213
|
+
df.rename(columns = {'index':'datetime'},inplace = True)
|
|
214
|
+
df['TIMESERIES'] = t_con
|
|
215
|
+
df['OPERATION'] = t_opn
|
|
216
|
+
return df
|
|
197
217
|
|
|
198
218
|
def get_perlnd_constituent(self,constituent,perlnd_ids = None,time_step = 5):
|
|
199
219
|
return get_simulated_perlnd_constituent(self,constituent,time_step)
|
|
@@ -217,13 +237,33 @@ class hbnInterface:
|
|
|
217
237
|
# for dic in dics:
|
|
218
238
|
# for key, vals in dic.items():
|
|
219
239
|
# [dd[key].append(val) for val in vals]
|
|
220
|
-
dd = defaultdict(set)
|
|
240
|
+
# dd = defaultdict(set)
|
|
221
241
|
dics = [hbn.output_names() for hbn in self.hbns]
|
|
242
|
+
return merge_dicts(dics)
|
|
243
|
+
# for dic in dics:
|
|
244
|
+
# for operation, vals in dic.items():
|
|
245
|
+
# for activity,v in vals.items():
|
|
246
|
+
# [dd[operation][activity].add(t) for t in v]
|
|
247
|
+
# return dd
|
|
248
|
+
|
|
249
|
+
def _timeseries(self):
|
|
250
|
+
mapn = self._mapn()
|
|
251
|
+
timeseries = []
|
|
252
|
+
for key, vals in mapn.items():
|
|
253
|
+
_key = list(key)
|
|
254
|
+
for val in vals:
|
|
255
|
+
timeseries.append(_key + [val])
|
|
256
|
+
return timeseries
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
def _mapn(self):
|
|
260
|
+
dd = defaultdict(set)
|
|
261
|
+
dics = [hbn.mapn for hbn in self.hbns]
|
|
222
262
|
for dic in dics:
|
|
223
263
|
for key, vals in dic.items():
|
|
224
264
|
[dd[key].add(val) for val in vals]
|
|
225
|
-
return dd
|
|
226
|
-
|
|
265
|
+
return dd
|
|
266
|
+
|
|
227
267
|
def get_perlnd_data(self,constituent,t_code = 'yearly'):
|
|
228
268
|
t_cons = helpers.get_tcons(constituent,'PERLND')
|
|
229
269
|
|
|
@@ -236,14 +276,13 @@ class hbnInterface:
|
|
|
236
276
|
return df
|
|
237
277
|
|
|
238
278
|
|
|
239
|
-
def
|
|
279
|
+
def get_rchres_output(self,constituent,units = 'mg/l',t_code = 5):
|
|
240
280
|
'''
|
|
241
281
|
Convience function for accessing the hbn time series associated with our current
|
|
242
282
|
calibration method. Assumes you are summing across all dataframes.
|
|
243
283
|
'''
|
|
244
|
-
|
|
245
|
-
df =
|
|
246
|
-
df.columns = reach_ids
|
|
284
|
+
t_cons = helpers.get_tcons(constituent,'RCHRES',units)
|
|
285
|
+
df = sum([self.get_multiple_timeseries('RCHRES',t_code,t_con) for t_con in t_cons])
|
|
247
286
|
df.attrs['unit'] = units
|
|
248
287
|
df.attrs['constituent'] = constituent
|
|
249
288
|
return df
|
|
@@ -399,19 +438,27 @@ class hbnClass:
|
|
|
399
438
|
def infer_opnids(self,t_opn, t_cons,activity):
|
|
400
439
|
result = [k[-2] for k,v in self.mapn.items() if (t_cons in v) & (k[0] == t_opn) & (k[-1] == activity)]
|
|
401
440
|
if len(result) == 0:
|
|
402
|
-
|
|
441
|
+
result = [-1]
|
|
442
|
+
# return print('No Constituent-OPNID relationship found')
|
|
403
443
|
return result
|
|
404
444
|
|
|
405
445
|
|
|
406
446
|
def infer_activity(self,t_opn, t_cons):
|
|
407
447
|
result = [k[-1] for k,v in self.mapn.items() if (t_cons in v) & (k[0] == t_opn)]
|
|
408
448
|
if len(result) == 0:
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
449
|
+
result = ''
|
|
450
|
+
else:# return print('No Constituent-Activity relationship found')
|
|
451
|
+
assert(len(set(result)) == 1)
|
|
452
|
+
result = result[0]
|
|
453
|
+
return result
|
|
413
454
|
|
|
414
455
|
def get_time_series(self, t_opn, t_cons, t_code, opnid, activity = None):
|
|
456
|
+
df = self._get_time_series(t_opn, t_cons, t_code, opnid, activity)
|
|
457
|
+
if df.empty:
|
|
458
|
+
raise ValueError(f"No data found for {t_opn} {t_cons} {t_code} {opnid} {activity}")
|
|
459
|
+
return df
|
|
460
|
+
|
|
461
|
+
def _get_time_series(self, t_opn, t_cons, t_code, opnid, activity = None):
|
|
415
462
|
"""
|
|
416
463
|
get a single time series based on:
|
|
417
464
|
1. t_opn: RCHRES, IMPLND, PERLND
|
|
@@ -420,13 +467,15 @@ class hbnClass:
|
|
|
420
467
|
4. t_activity: HYDR, IQUAL, etc
|
|
421
468
|
5. time_unit: yearly, monthly, full (default is 'full' simulation duration)
|
|
422
469
|
"""
|
|
470
|
+
|
|
471
|
+
|
|
423
472
|
if isinstance(t_code,str):
|
|
424
473
|
t_code = self.tcodes[t_code]
|
|
425
474
|
|
|
426
475
|
if activity is None:
|
|
427
476
|
activity = self.infer_activity(t_opn,t_cons)
|
|
428
|
-
|
|
429
|
-
|
|
477
|
+
|
|
478
|
+
|
|
430
479
|
summaryindx = f'{t_opn}_{activity}_{opnid:03d}_{t_code}'
|
|
431
480
|
if summaryindx in self.summaryindx:
|
|
432
481
|
df = self.data_frames[summaryindx][t_cons].copy()
|
|
@@ -438,25 +487,31 @@ class hbnClass:
|
|
|
438
487
|
#df.index = df.index.shift(-1,TCODES2FREQ[t_code])
|
|
439
488
|
df = df[df.index >= '1996-01-01']
|
|
440
489
|
else:
|
|
441
|
-
df =
|
|
490
|
+
df = pd.DataFrame()
|
|
442
491
|
|
|
443
492
|
return df
|
|
493
|
+
|
|
444
494
|
def get_multiple_timeseries(self,t_opn,t_code,t_con,opnids = None,activity = None):
|
|
495
|
+
df = self._get_multiple_timeseries(t_opn,t_code,t_con,opnids,activity)
|
|
496
|
+
if df.empty:
|
|
497
|
+
raise ValueError(f"No data found for {t_opn} {t_con} {t_code} {opnids} {activity}")
|
|
498
|
+
return df
|
|
499
|
+
|
|
500
|
+
def _get_multiple_timeseries(self,t_opn,t_code,t_con,opnids = None,activity = None):
|
|
445
501
|
# a single constituent but multiple opnids
|
|
502
|
+
|
|
503
|
+
|
|
446
504
|
if isinstance(t_code,str):
|
|
447
505
|
t_code = self.tcodes[t_code]
|
|
448
506
|
|
|
449
507
|
if activity is None:
|
|
450
|
-
activity = self.infer_activity(t_opn,t_con)
|
|
451
|
-
|
|
452
|
-
return None
|
|
453
|
-
|
|
508
|
+
activity = self.infer_activity(t_opn,t_con)
|
|
509
|
+
|
|
454
510
|
if opnids is None:
|
|
455
511
|
opnids = self.infer_opnids(t_opn,t_con,activity)
|
|
456
|
-
|
|
457
|
-
return None
|
|
512
|
+
|
|
458
513
|
|
|
459
|
-
df =
|
|
514
|
+
df = pd.DataFrame()
|
|
460
515
|
frames = []
|
|
461
516
|
mapd_list = list(self.mapd.keys())
|
|
462
517
|
for opnid in opnids:
|
|
@@ -475,9 +530,76 @@ class hbnClass:
|
|
|
475
530
|
dic[activity] = set([item for sublist in t_cons for item in sublist])
|
|
476
531
|
return dic
|
|
477
532
|
|
|
533
|
+
|
|
534
|
+
def output_names(self):
|
|
535
|
+
|
|
536
|
+
activities = []
|
|
537
|
+
operations = []
|
|
538
|
+
for k, v in self.mapn.items():
|
|
539
|
+
operations.append(k[0])
|
|
540
|
+
activities.append(k[-1])
|
|
541
|
+
|
|
542
|
+
operations = set(operations)
|
|
543
|
+
activities = set(activities)
|
|
544
|
+
#activities = set([k[-1] for k,v in self.mapn.items()])
|
|
545
|
+
|
|
546
|
+
dic = {}
|
|
547
|
+
for operation in operations:
|
|
548
|
+
acitivities = set([k[-1] for k,v in self.mapn.items() if k[0] == operation])
|
|
549
|
+
dic[operation] = {}
|
|
550
|
+
for activity in acitivities:
|
|
551
|
+
t_cons = [v for k,v in self.mapn.items() if (k[0] == operation) & (k[-1] == activity)]
|
|
552
|
+
dic[operation][activity] = set([item for sublist in t_cons for item in sublist])
|
|
553
|
+
# for activity in activities:
|
|
554
|
+
# t_cons = [v for k,v in self.mapn.items() if k[-1] == activity]
|
|
555
|
+
# dic[activity] = set([item for sublist in t_cons for item in sublist])
|
|
556
|
+
return dic
|
|
557
|
+
|
|
558
|
+
def get_timeseries(self):
|
|
559
|
+
mapn = self.mapn
|
|
560
|
+
timeseries = []
|
|
561
|
+
for key, vals in mapn.items():
|
|
562
|
+
_key = list(key)
|
|
563
|
+
for val in vals:
|
|
564
|
+
timeseries.append(_key + [val])
|
|
565
|
+
return timeseries
|
|
566
|
+
|
|
478
567
|
@staticmethod
|
|
479
568
|
def get_perlands(summary_indxs):
|
|
480
569
|
perlands = [int(summary_indx.split('_')[-2]) for summary_indx in summary_indxs]
|
|
481
570
|
return perlands
|
|
482
571
|
|
|
483
|
-
|
|
572
|
+
|
|
573
|
+
def merge_dicts(dicts):
|
|
574
|
+
"""
|
|
575
|
+
Merge a list of dictionaries into a single dictionary, combining sets
|
|
576
|
+
at the leaf level and properly merging nested dictionaries.
|
|
577
|
+
|
|
578
|
+
Args:
|
|
579
|
+
dicts (list): A list of dictionaries to merge.
|
|
580
|
+
|
|
581
|
+
Returns:
|
|
582
|
+
dict: The merged dictionary.
|
|
583
|
+
"""
|
|
584
|
+
def recursive_merge(d1, d2):
|
|
585
|
+
for key, value in d2.items():
|
|
586
|
+
if key in d1:
|
|
587
|
+
# If the value is a dictionary, recurse
|
|
588
|
+
if isinstance(d1[key], MutableMapping) and isinstance(value, MutableMapping):
|
|
589
|
+
recursive_merge(d1[key], value)
|
|
590
|
+
# If the value is a set, merge the sets
|
|
591
|
+
elif isinstance(d1[key], set) and isinstance(value, set):
|
|
592
|
+
d1[key].update(value)
|
|
593
|
+
else:
|
|
594
|
+
raise ValueError(f"Incompatible types for key '{key}': {type(d1[key])} vs {type(value)}")
|
|
595
|
+
else:
|
|
596
|
+
# If the key does not exist in d1, copy it
|
|
597
|
+
d1[key] = value
|
|
598
|
+
|
|
599
|
+
# Start with an empty dictionary
|
|
600
|
+
merged_dict = {}
|
|
601
|
+
|
|
602
|
+
for d in dicts:
|
|
603
|
+
recursive_merge(merged_dict, d)
|
|
604
|
+
|
|
605
|
+
return merged_dict
|
|
@@ -7,17 +7,19 @@ Created on Thu Oct 13 09:26:05 2022
|
|
|
7
7
|
from pathlib import Path
|
|
8
8
|
import os.path
|
|
9
9
|
import subprocess
|
|
10
|
+
import concurrent.futures
|
|
10
11
|
|
|
11
|
-
from .uci import UCI
|
|
12
|
-
from
|
|
13
|
-
from .reports import Reports
|
|
14
|
-
from .wdm import wdmInterface
|
|
15
|
-
from
|
|
12
|
+
from hspf.uci import UCI
|
|
13
|
+
from hspf import hbn
|
|
14
|
+
from hspf.reports import Reports
|
|
15
|
+
from hspf.wdm import wdmInterface
|
|
16
|
+
from hspf import wdmReader
|
|
16
17
|
|
|
17
18
|
|
|
18
19
|
|
|
19
20
|
|
|
20
21
|
|
|
22
|
+
winHSPF = str(Path(__file__).resolve().parent) + '\\bin\\WinHSPFLt\\WinHspfLt.exe'
|
|
21
23
|
|
|
22
24
|
|
|
23
25
|
# Only for accessing information regarding a specific uci_file
|
|
@@ -51,6 +53,15 @@ class hspfModel():
|
|
|
51
53
|
# Compositions
|
|
52
54
|
self.reports = Reports(self.uci,self.hbns,self.wdms)
|
|
53
55
|
|
|
56
|
+
def _reinitialize(self,uci_file:str,run_model:bool = False):
|
|
57
|
+
self.uci = UCI(uci_file)
|
|
58
|
+
self.validate_uci(run_model = run_model)
|
|
59
|
+
self.hbns = hbn.hbnInterface(self.hbn_paths)
|
|
60
|
+
try:
|
|
61
|
+
self.wdms = wdmInterface(self.wdm_paths)
|
|
62
|
+
except:
|
|
63
|
+
self.wdms = None
|
|
64
|
+
self.reports = Reports(self.uci,self.hbns,self.wdms)
|
|
54
65
|
|
|
55
66
|
def validate_wdms(self):
|
|
56
67
|
# Ensure wdm files exist and the folders for the other file types exist relative
|
|
@@ -92,15 +103,16 @@ class hspfModel():
|
|
|
92
103
|
else:
|
|
93
104
|
self.run_model()
|
|
94
105
|
|
|
95
|
-
def run_model(self,new_uci_file = None):
|
|
106
|
+
def run_model(self,new_uci_file = None,):
|
|
96
107
|
|
|
97
108
|
if new_uci_file is None:
|
|
98
109
|
new_uci_file = self.uci_file
|
|
99
110
|
|
|
100
111
|
# new_uci_file = self.model_path.joinpath(uci_name)
|
|
101
112
|
# self.uci.write(new_uci_file)
|
|
102
|
-
|
|
103
|
-
self.
|
|
113
|
+
|
|
114
|
+
subprocess.run([winHSPF,self.uci_file.as_posix()]) #, stdout=subprocess.PIPE, creationflags=0x08000000)
|
|
115
|
+
self._reinitialize(new_uci_file,run_model = False)
|
|
104
116
|
|
|
105
117
|
def load_hbn(self,hbn_name):
|
|
106
118
|
self.hbns[hbn_name] = hbn.hbnClass(self.uci_file.parent.joinpath(hbn_name).as_posix())
|
|
@@ -177,8 +189,35 @@ class hspfModel():
|
|
|
177
189
|
|
|
178
190
|
|
|
179
191
|
|
|
192
|
+
def run_uci(uci_file:str, ):
|
|
193
|
+
"""
|
|
194
|
+
convenience function to run a single model uci file.
|
|
195
|
+
"""
|
|
196
|
+
print(f"Starting model: {uci_file}")
|
|
197
|
+
subprocess.run([winHSPF, uci_file])
|
|
198
|
+
print(f"Completed model: {uci_file}")
|
|
180
199
|
|
|
181
200
|
|
|
201
|
+
def run_batch_files(file_list, max_concurrent=4):
|
|
202
|
+
"""
|
|
203
|
+
Takes a list of .uci file paths and runs them N at a time.
|
|
204
|
+
"""
|
|
205
|
+
# Create a pool of workers (threads)
|
|
206
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=max_concurrent) as executor:
|
|
207
|
+
# Submit all jobs to the pool
|
|
208
|
+
future_to_file = {
|
|
209
|
+
executor.submit(run_uci, uci_file): uci_file
|
|
210
|
+
for uci_file in file_list
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
# Monitor completion (optional, but good for error catching)
|
|
214
|
+
for future in concurrent.futures.as_completed(future_to_file):
|
|
215
|
+
uci_file = future_to_file[future]
|
|
216
|
+
try:
|
|
217
|
+
future.result() # This will raise exceptions if run_uci failed
|
|
218
|
+
except Exception as exc:
|
|
219
|
+
print(f"File {uci_file} generated an exception: {exc}")
|
|
220
|
+
|
|
182
221
|
|
|
183
222
|
# class runManager():
|
|
184
223
|
# def __init__()
|
|
@@ -635,7 +635,8 @@ class reachNetwork():
|
|
|
635
635
|
areas = areas.groupby(['source_type','source_type_id','source_name'])['area'].sum()[['PERLND','IMPLND']]
|
|
636
636
|
|
|
637
637
|
if group:
|
|
638
|
-
areas =
|
|
638
|
+
areas = areas.groupby(['source_type','source_name']).sum()
|
|
639
|
+
#areas = pd.concat([areas[operation].groupby('source_name').sum() for operation in ['PERLND','IMPLND']])
|
|
639
640
|
#areas = pd.concat([areas[operation].groupby(self.uci.opnid_dict[operation].loc[areas[operation].index,'LSID'].values).sum() for operation in ['PERLND','IMPLND']])
|
|
640
641
|
return areas
|
|
641
642
|
|