hspf 2.1.0__py3-none-any.whl → 2.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hspf/Masslink_Timeseries.csv +240 -0
- hspf/bin/WinHSPFLt/ATCoRend.dbf +0 -0
- hspf/bin/WinHSPFLt/ATCoUnits.mdb +0 -0
- hspf/bin/WinHSPFLt/ERROR.FIL +52657 -0
- hspf/bin/WinHSPFLt/LF90.EER +0 -0
- hspf/bin/WinHSPFLt/LF90WIOD.DLL +0 -0
- hspf/bin/WinHSPFLt/MapWinUtility.dll +0 -0
- hspf/bin/WinHSPFLt/StatusMonitor.exe +0 -0
- hspf/bin/WinHSPFLt/hass_ent.dll +0 -0
- hspf/bin/WinHSPFLt/hspfmsg.wdm +0 -0
- hspf/bin/WinHSPFLt/hspfmsg.wdu +1 -0
- hspf/build_warehouse.py +625 -0
- hspf/data/HSPFParameterRanges.csv +492 -0
- hspf/data/LandUseNames_Mappings.csv +3330 -0
- hspf/hbn.py +28 -8
- hspf/hbn2.py +316 -0
- hspf/hbn_cy.c +14450 -0
- hspf/hbn_cy.html +1540 -0
- hspf/hbn_cy.pyx +107 -0
- hspf/helpers.py +11 -9
- hspf/parser/graph.py +17 -2
- hspf/reports.py +268 -459
- hspf/validations.py +211 -0
- hspf/warehouse.py +282 -0
- {hspf-2.1.0.dist-info → hspf-2.1.2.dist-info}/METADATA +1 -1
- {hspf-2.1.0.dist-info → hspf-2.1.2.dist-info}/RECORD +27 -7
- {hspf-2.1.0.dist-info → hspf-2.1.2.dist-info}/WHEEL +0 -0
hspf/hbn_cy.pyx
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
# cython: language_level=3, boundscheck=False, wraparound=False, cdivision=True
|
|
2
|
+
# hbn_cy.pyx - Cython helpers for reading HBN binary files
|
|
3
|
+
from cpython.bytes cimport PyBytes_AsStringAndSize
|
|
4
|
+
cimport cython
|
|
5
|
+
import numpy as np
|
|
6
|
+
cimport numpy as cnp
|
|
7
|
+
from datetime import datetime, timedelta
|
|
8
|
+
|
|
9
|
+
@cython.inline
|
|
10
|
+
cdef unsigned int _read_uint32_le(const unsigned char* buf, Py_ssize_t offset) nogil:
|
|
11
|
+
"""Reads a little-endian unsigned 32-bit integer."""
|
|
12
|
+
return buf[offset] | (buf[offset+1] << 8) | (buf[offset+2] << 16) | (buf[offset+3] << 24)
|
|
13
|
+
|
|
14
|
+
@cython.boundscheck(False)
|
|
15
|
+
@cython.wraparound(False)
|
|
16
|
+
def map_hbn_file(str file_path):
|
|
17
|
+
"""
|
|
18
|
+
Parses an HBN file from a file path to produce mapn and mapd dictionaries.
|
|
19
|
+
Returns (mapn, mapd, data_bytes).
|
|
20
|
+
"""
|
|
21
|
+
cdef:
|
|
22
|
+
bytes data_bytes
|
|
23
|
+
const unsigned char* cbuf
|
|
24
|
+
Py_ssize_t buf_len, index = 1, i, slen, ln
|
|
25
|
+
unsigned int rectype, tcode, idval, reclen
|
|
26
|
+
unsigned char rc1, rc2, rc3, rc
|
|
27
|
+
dict mapn = {}
|
|
28
|
+
dict mapd = {}
|
|
29
|
+
|
|
30
|
+
with open(file_path, 'rb') as f:
|
|
31
|
+
data_bytes = f.read()
|
|
32
|
+
if not data_bytes:
|
|
33
|
+
raise ValueError(f"File is empty: {file_path}")
|
|
34
|
+
|
|
35
|
+
PyBytes_AsStringAndSize(data_bytes, <char **>&cbuf, &buf_len)
|
|
36
|
+
if cbuf[0] != 0xFD:
|
|
37
|
+
raise ValueError("BAD HBN FILE - must start with magic number 0xFD")
|
|
38
|
+
|
|
39
|
+
while index < buf_len:
|
|
40
|
+
if index + 28 > buf_len: break
|
|
41
|
+
rc1 = cbuf[index]; rc2 = cbuf[index+1]; rc3 = cbuf[index+2]; rc = cbuf[index+3]
|
|
42
|
+
rectype = _read_uint32_le(cbuf, index + 4)
|
|
43
|
+
idval = _read_uint32_le(cbuf, index + 16)
|
|
44
|
+
reclen = (<unsigned int>(rc) * 4194304) + (<unsigned int>(rc3) * 16384) + (<unsigned int>(rc2) * 64) + (<unsigned int>(rc1) >> 2) - 24
|
|
45
|
+
|
|
46
|
+
operation = data_bytes[index+8:index+16].decode('ascii', 'ignore').strip()
|
|
47
|
+
activity = data_bytes[index+20:index+28].decode('ascii', 'ignore').strip()
|
|
48
|
+
|
|
49
|
+
if rectype == 1: # data record
|
|
50
|
+
if index + 36 > buf_len: break
|
|
51
|
+
tcode = _read_uint32_le(cbuf, index + 32)
|
|
52
|
+
key = (operation, idval, activity, int(tcode))
|
|
53
|
+
if key not in mapd: mapd[key] = []
|
|
54
|
+
mapd[key].append((index, reclen))
|
|
55
|
+
elif rectype == 0: # data names record
|
|
56
|
+
key = (operation, idval, activity)
|
|
57
|
+
if key not in mapn: mapn[key] = []
|
|
58
|
+
i = index + 28
|
|
59
|
+
slen = 0
|
|
60
|
+
while slen < reclen:
|
|
61
|
+
if i + slen + 4 > buf_len: break
|
|
62
|
+
ln = _read_uint32_le(cbuf, i + slen)
|
|
63
|
+
if i + slen + 4 + ln > buf_len: break
|
|
64
|
+
name = data_bytes[i + slen + 4 : i + slen + 4 + ln].decode('ascii', 'ignore').strip().replace('-', '')
|
|
65
|
+
mapn[key].append(name)
|
|
66
|
+
slen += 4 + ln
|
|
67
|
+
|
|
68
|
+
if reclen < 36: index += reclen + 29
|
|
69
|
+
else: index += reclen + 30
|
|
70
|
+
|
|
71
|
+
return mapn, mapd, data_bytes
|
|
72
|
+
|
|
73
|
+
@cython.boundscheck(False)
|
|
74
|
+
@cython.wraparound(False)
|
|
75
|
+
def read_data_entries(bytes data_bytes, list entries, int nvals):
|
|
76
|
+
"""
|
|
77
|
+
Reads data entries from the file's bytes. Returns (times, rows_array).
|
|
78
|
+
"""
|
|
79
|
+
cdef:
|
|
80
|
+
const unsigned char* cbuf
|
|
81
|
+
Py_ssize_t buf_len, num_entries = len(entries), k, idx
|
|
82
|
+
unsigned int yr, mo, dy, hr, mn
|
|
83
|
+
cnp.ndarray[cnp.float32_t, ndim=2] rows2d = np.empty((num_entries, nvals), dtype=np.float32)
|
|
84
|
+
list times = [None] * num_entries
|
|
85
|
+
|
|
86
|
+
PyBytes_AsStringAndSize(data_bytes, <char **>&cbuf, &buf_len)
|
|
87
|
+
|
|
88
|
+
for k in range(num_entries):
|
|
89
|
+
idx = entries[k][0] # Get just the index from the (index, reclen) tuple
|
|
90
|
+
|
|
91
|
+
# Boundary check for safety
|
|
92
|
+
if idx + 56 + (nvals * 4) > buf_len: continue
|
|
93
|
+
|
|
94
|
+
yr = _read_uint32_le(cbuf, idx + 36)
|
|
95
|
+
mo = _read_uint32_le(cbuf, idx + 40)
|
|
96
|
+
dy = _read_uint32_le(cbuf, idx + 44)
|
|
97
|
+
hr = _read_uint32_le(cbuf, idx + 48)
|
|
98
|
+
mn = _read_uint32_le(cbuf, idx + 52)
|
|
99
|
+
|
|
100
|
+
try:
|
|
101
|
+
times[k] = datetime(int(yr), int(mo), int(dy), int(hr) - 1, int(mn))
|
|
102
|
+
except ValueError:
|
|
103
|
+
times[k] = datetime(1900, 1, 1) # Fallback for bad date data
|
|
104
|
+
|
|
105
|
+
rows2d[k] = np.frombuffer(data_bytes, dtype=np.float32, count=nvals, offset=idx + 56)
|
|
106
|
+
|
|
107
|
+
return times, rows2d
|
hspf/helpers.py
CHANGED
|
@@ -48,26 +48,28 @@ def get_tcons(nutrient_name,operation,units = 'mg/l'):
|
|
|
48
48
|
'N' :['NO3OUTTOT','NO2OUTTOT'], # N
|
|
49
49
|
'OP' :['PO4OUTDIS'], # Ortho
|
|
50
50
|
'TP' :['PTOTOUT'],
|
|
51
|
-
'BOD' :['BODOUTTOT']},
|
|
51
|
+
'BOD' :['BODOUTTOT'],},
|
|
52
52
|
'cfs': {'Q': ['ROVOL']},
|
|
53
|
-
'acrft' : {'Q': ['ROVOL']}
|
|
53
|
+
'acrft' : {'Q': ['ROVOL']},
|
|
54
|
+
'degf' : {'WT': ['TW']}}
|
|
54
55
|
|
|
55
56
|
t_cons = MAP[units]
|
|
56
|
-
|
|
57
|
+
elif operation == 'PERLND':
|
|
57
58
|
t_cons = {'TSS' :['SOSED'],
|
|
58
59
|
'TKN' :['POQUALNH3+NH4'],
|
|
59
60
|
'N' :['POQUALNO3'],
|
|
60
61
|
'OP' :['POQUALORTHO P'],
|
|
61
62
|
'BOD' :['POQUALBOD'],
|
|
62
63
|
'Q' : ['PERO']} # BOD is the difference of ptot and ortho
|
|
63
|
-
|
|
64
|
+
elif operation == 'IMPLND':
|
|
64
65
|
t_cons = {'TSS' :['SLDS'],
|
|
65
|
-
'TKN' :['
|
|
66
|
-
'N' :['
|
|
67
|
-
'OP' :['
|
|
68
|
-
'BOD' :['
|
|
66
|
+
'TKN' :['SOQUALNH3+NH4'],
|
|
67
|
+
'N' :['SOQUALNO3'],
|
|
68
|
+
'OP' :['SOQUALORTHO P'],
|
|
69
|
+
'BOD' :['SOQUALBOD'],
|
|
69
70
|
'Q' : ['SURO']} # BOD is the difference of ptot and ortho
|
|
70
|
-
|
|
71
|
+
else:
|
|
72
|
+
raise ValueError(f'Operation {operation} not recognized for nutrient time constituent lookup.')
|
|
71
73
|
return t_cons[nutrient_name]
|
|
72
74
|
|
|
73
75
|
|
hspf/parser/graph.py
CHANGED
|
@@ -263,6 +263,9 @@ def get_implnd_node(G,implnd_id):
|
|
|
263
263
|
def get_node_type_ids(G,node_type = 'RCHRES'):
|
|
264
264
|
return [data['type_id'] for node, data in G.nodes(data = True) if data['type'] == node_type]
|
|
265
265
|
|
|
266
|
+
def get_node_type_id(G,node_id):
|
|
267
|
+
return G.nodes[node_id]['type_id']
|
|
268
|
+
|
|
266
269
|
def get_reaches(G):
|
|
267
270
|
return get_node_type_ids(G, node_type = 'RCHRES')
|
|
268
271
|
|
|
@@ -372,7 +375,8 @@ def make_watershed(G,reach_ids,upstream_reach_ids = None):
|
|
|
372
375
|
|
|
373
376
|
|
|
374
377
|
return G.subgraph(nodes).copy()
|
|
375
|
-
|
|
378
|
+
|
|
379
|
+
|
|
376
380
|
|
|
377
381
|
# node_ids = set([get_node_id(G,'RCHRES',reach_id) for reach_id in reach_ids if reach_id > 0])
|
|
378
382
|
# nodes_to_exclude = set([get_node_id(G,'RCHRES',abs(reach_id)) for reach_id in reach_ids if reach_id < 0])
|
|
@@ -520,10 +524,20 @@ class reachNetwork():
|
|
|
520
524
|
self.routing_reaches = self._routing_reaches()
|
|
521
525
|
self.lakes = self._lakes()
|
|
522
526
|
self.schematic = uci.table('SCHEMATIC').astype({'TVOLNO': int, "SVOLNO": int, 'AFACTR':float})
|
|
523
|
-
|
|
527
|
+
#self.subwatersheds = self._subwatersheds(self.uci)
|
|
528
|
+
|
|
524
529
|
def get_node_type_ids(self,node_type):
|
|
525
530
|
return get_node_type_ids(self.G, node_type)
|
|
526
531
|
|
|
532
|
+
def watershed_outlets(self):
|
|
533
|
+
reach_ids = []
|
|
534
|
+
for reach_id in self.get_node_type_ids('RCHRES'):
|
|
535
|
+
upstream = self.upstream(reach_id)
|
|
536
|
+
reach_ids.append([reach_id])
|
|
537
|
+
if len(upstream) > 1:
|
|
538
|
+
reach_ids.append(upstream)
|
|
539
|
+
return reach_ids
|
|
540
|
+
|
|
527
541
|
def _upstream(self,reach_id,node_type = 'RCHRES'):
|
|
528
542
|
'''
|
|
529
543
|
Returns list of model reaches upstream of inclusive of reach_id
|
|
@@ -690,6 +704,7 @@ def subwatersheds(uci):
|
|
|
690
704
|
|
|
691
705
|
df = pd.concat(dfs).reset_index()
|
|
692
706
|
df = df.set_index('TVOLNO')
|
|
707
|
+
|
|
693
708
|
return df
|
|
694
709
|
|
|
695
710
|
def subwatershed(uci,reach_id):
|