hspf 2.0.0__tar.gz → 2.0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. hspf-2.0.2/.gitignore +3 -0
  2. {hspf-2.0.0 → hspf-2.0.2}/PKG-INFO +1 -1
  3. {hspf-2.0.0 → hspf-2.0.2}/pyproject.toml +1 -1
  4. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/hbn.py +6 -5
  5. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/hspfModel.py +5 -5
  6. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/parser/graph.py +69 -8
  7. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/reports.py +1 -1
  8. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/uci.py +1 -1
  9. hspf-2.0.0/.gitignore +0 -1
  10. {hspf-2.0.0 → hspf-2.0.2}/.gitattributes +0 -0
  11. {hspf-2.0.0 → hspf-2.0.2}/MANIFEST.in +0 -0
  12. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/__init__.py +0 -0
  13. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/data/ParseTable.csv +0 -0
  14. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/data/Timeseries Catalog/IMPLND/IQUAL.txt +0 -0
  15. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/data/Timeseries Catalog/IMPLND/IWATER.txt +0 -0
  16. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/data/Timeseries Catalog/IMPLND/IWTGAS.txt +0 -0
  17. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/data/Timeseries Catalog/IMPLND/SOLIDS.txt +0 -0
  18. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/data/Timeseries Catalog/PERLND/MSTLAY.txt +0 -0
  19. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/data/Timeseries Catalog/PERLND/PQUAL.txt +0 -0
  20. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/data/Timeseries Catalog/PERLND/PSTEMP.txt +0 -0
  21. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/data/Timeseries Catalog/PERLND/PWATER.txt +0 -0
  22. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/data/Timeseries Catalog/PERLND/PWATGAS.txt +0 -0
  23. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/data/Timeseries Catalog/PERLND/SEDMNT.txt +0 -0
  24. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/data/Timeseries Catalog/PERLND/SNOW.txt +0 -0
  25. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/data/Timeseries Catalog/RCHRES/CONS.txt +0 -0
  26. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/data/Timeseries Catalog/RCHRES/GQUAL.txt +0 -0
  27. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/data/Timeseries Catalog/RCHRES/HTRCH.txt +0 -0
  28. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/data/Timeseries Catalog/RCHRES/HYDR.txt +0 -0
  29. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/data/Timeseries Catalog/RCHRES/NUTRX.txt +0 -0
  30. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/data/Timeseries Catalog/RCHRES/OXRX.txt +0 -0
  31. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/data/Timeseries Catalog/RCHRES/PLANK.txt +0 -0
  32. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/data/Timeseries Catalog/RCHRES/SEDTRN.txt +0 -0
  33. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/helpers.py +0 -0
  34. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/parser/__init__.py +0 -0
  35. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/parser/parsers.py +0 -0
  36. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/wdm.py +0 -0
  37. {hspf-2.0.0 → hspf-2.0.2}/src/hspf/wdmReader.py +0 -0
hspf-2.0.2/.gitignore ADDED
@@ -0,0 +1,3 @@
1
+ *.pyc
2
+ *.whl
3
+ *.gz
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hspf
3
- Version: 2.0.0
3
+ Version: 2.0.2
4
4
  Summary: Python package for downloading and running HSPF models
5
5
  Project-URL: Homepage, https://github.com/mfratkin1/pyHSPF
6
6
  Author-email: Mulu Fratkin <michael.fratkin@state.mn.us>
@@ -5,7 +5,7 @@ build-backend = "hatchling.build"
5
5
  [project]
6
6
  name = "hspf"
7
7
  urls = { "Homepage" = "https://github.com/mfratkin1/pyHSPF" } # ? Add this!
8
- version = "2.0.0"
8
+ version = "2.0.2"
9
9
  dependencies = [
10
10
  "pandas",
11
11
  "requests",
@@ -6,15 +6,14 @@ nutrients relevant for our current calibration methods. (See calibration_helpers
6
6
 
7
7
  @author: mfratki
8
8
  """
9
- from pyhspf import helpers
9
+ from . import helpers
10
10
  import pandas as pd
11
11
  import math
12
12
  from struct import unpack
13
13
  from numpy import fromfile
14
14
  from pandas import DataFrame
15
- from datetime import datetime, timedelta, timezone
15
+ from datetime import datetime, timedelta #, timezone
16
16
  from collections import defaultdict
17
- import numpy as np
18
17
  #from pathlib import Path
19
18
 
20
19
 
@@ -350,11 +349,13 @@ class hbnClass:
350
349
  rows = []
351
350
  times = []
352
351
  nvals = len(self.mapn[operation, id, activity]) # number constituent timeseries
353
- utc_offset = timezone(timedelta(hours=-6)) #UTC is 6hours ahead of CST
352
+ #utc_offset = timezone(timedelta(hours=-6)) #UTC is 6hours ahead of CST
354
353
  for (index, reclen) in self.mapd[operation, id, activity, tcode]:
355
354
  yr, mo, dy, hr, mn = unpack('5I', self.data[index + 36: index + 56])
356
355
  hr = hr-1
357
- dt = datetime(yr, mo, dy, 0, mn ,tzinfo=utc_offset) + timedelta(hours=hr)
356
+ #dt = datetime(yr, mo, dy, 0, mn ,tzinfo=utc_offset) + timedelta(hours=hr)
357
+ dt = datetime(yr, mo, dy, 0, mn ) + timedelta(hours=hr)
358
+
358
359
  times.append(dt)
359
360
 
360
361
  index += 56
@@ -8,11 +8,11 @@ from pathlib import Path
8
8
  import os.path
9
9
  import subprocess
10
10
 
11
- from pyhspf.uci import UCI
12
- from pyhspf import hbn
13
- from pyhspf.reports import Reports
14
- from pyhspf.wdm import wdmInterface
15
- from pyhspf import wdmReader
11
+ from .uci import UCI
12
+ from . import hbn
13
+ from .reports import Reports
14
+ from .wdm import wdmInterface
15
+ from . import wdmReader
16
16
 
17
17
 
18
18
 
@@ -332,6 +332,14 @@ CREATE TABLE GenInfo (
332
332
 
333
333
  #%% Methods using universal node id
334
334
 
335
+ # def bypass_node(G, node):
336
+ # preds = list(G.predecessors(node))
337
+ # succs = list(G.successors(node))
338
+ # for u in preds:
339
+ # for v in succs:
340
+ # if
341
+ # G.add_edge(u, v)
342
+ # G.remove_node(node)
335
343
 
336
344
  def _add_subgraph_labels(G,G_sub):
337
345
  G_sub.labels = {label:node for label, node in G.labels.items() if node in G_sub.nodes}
@@ -519,7 +527,24 @@ def paths(G,reach_id,source_type = 'RCHRES'):
519
527
  def count_ancestors(G,node_type,ancestor_node_type):
520
528
  return {node['type_id']:len(ancestors(G,node['id'],ancestor_node_type)) for node in get_nodes(G,node_type)}
521
529
 
522
-
530
+ # def catchment_ids(G):
531
+ # result = []
532
+ # for node in get_node_ids(G,'RCHRES'):
533
+ # upstream_nodes = G.predecessors(node)
534
+ # if any([G.nodes[up]['type'] in ['PERLND','IMPLND'] for up in upstream_nodes]):
535
+ # result.append(G.nodes[node]['type_id'])
536
+ # return result
537
+
538
+ # Very expensive. Should probably standardize it so routing reaches have no implnds/perlnds
539
+ def catchment_ids(G):
540
+ result = []
541
+ for node in get_node_ids(G,'RCHRES'):
542
+ upstream_nodes = G.predecessors(node)
543
+ if any([G.nodes[up]['type'] in ['PERLND','IMPLND'] for up in upstream_nodes]):
544
+ cat = make_catchment(G,G.nodes[node]['type_id'])
545
+ if area(cat) > 0:
546
+ result.append(G.nodes[node]['type_id'])
547
+ return result
523
548
 
524
549
  # Catchment constructor
525
550
  def make_catchment(G,reach_id):
@@ -550,8 +575,28 @@ def make_watershed(G,reach_ids):
550
575
  return watershed
551
576
 
552
577
 
553
- def catcments(G):
554
- return None
578
+ # def catcments(G):
579
+ # cats = [Catchment(graph.make_catchment(G,reach_id) for reach_id in graph.get_node_type_ids(G,'RCHRES'))]
580
+
581
+ # return
582
+
583
+ # for u, v, edge_data in graph.make_catchment(G,reach_id).edges(data=True):
584
+ # source_node_attributes = G.nodes[u]
585
+ # # Add or update edge attributes with source node attributes
586
+ # edge_data["source_type"] = source_node_attributes.get("type")
587
+ # edge_data["source_name"] = source_node_attributes.get("name")
588
+ # edge_data["source_type_id"] = source_node_attributes.get("type_id")
589
+ # cats.append(edge_data)
590
+
591
+ # return pd.DataFrame(cats)
592
+
593
+
594
+ # for node in G.nodes:
595
+ # upstream_nodes = G.predecessors(node)
596
+ # if any(G.nodes[up]['type'] in ['PELND','IMPLND'] for up in upstream_nodes):
597
+ # result.append(node)
598
+
599
+ # return None
555
600
  # Catchment selectors
556
601
 
557
602
  '''
@@ -637,9 +682,12 @@ class Catchment():
637
682
  #%% Legacy Methods for Backwards compatability
638
683
  class reachNetwork():
639
684
  def __init__(self,uci,reach_id = None):
685
+ self.uci = uci
640
686
  self.G = create_graph(uci)
687
+ self.catchment_ids = catchment_ids(self.G)
688
+ self.routing_reaches = self._routing_reaches()
689
+ self.lakes = self._lakes()
641
690
  self.schematic = uci.table('SCHEMATIC').astype({'TVOLNO': int, "SVOLNO": int, 'AFACTR':float})
642
- self.uci = uci
643
691
 
644
692
  def get_node_type_ids(self,node_type):
645
693
  return get_node_type_ids(self.G, node_type)
@@ -709,15 +757,18 @@ class reachNetwork():
709
757
 
710
758
  def subwatersheds(self,reach_ids = None):
711
759
  df = subwatersheds(self.uci)
712
- if reach_ids is not None:
713
- df = df.loc[df.index.intersection(reach_ids)]
714
- return df
760
+ if reach_ids is None:
761
+ reach_ids = get_node_type_ids(self.G,'RCHRES')
762
+ return df.loc[df.index.intersection(reach_ids)]
715
763
 
716
764
  def subwatershed(self,reach_id):
717
765
  return subwatershed(self.uci,reach_id) #.loc[reach_id]
718
766
 
719
767
  def subwatershed_area(self,reach_id):
720
- return self.drainage(reach_id).query("source_type in ['PERLND','IMPLND']")['area'].sum()
768
+ area = self.drainage(reach_id).query("source_type in ['PERLND','IMPLND']")['area'].sum()
769
+ # if (reach_id in self.lakes()) & (f'FTABLE{reach_id}' in self.uci.table_names('FTABLES')):
770
+ # area = area + self.lake_area(reach_id)
771
+ return area
721
772
 
722
773
  def reach_contributions(self,operation,opnids):
723
774
  return reach_contributions(self.uci,operation,opnids)
@@ -736,6 +787,15 @@ class reachNetwork():
736
787
  def outlets(self):
737
788
  return [self.G.nodes[node]['type_id'] for node, out_degree in self.G.out_degree() if (out_degree == 0) & (self.G.nodes[node]['type'] == 'RCHRES')]
738
789
 
790
+ def _lakes(self):
791
+ return list(self.uci.table('RCHRES','GEN-INFO').query('LKFG == 1',engine = 'python').index.astype(int))
792
+
793
+ def lake_area(self,reach_id):
794
+ return self.uci.table('FTABLES',f'FTABLE{reach_id}')['Area'].max()
795
+
796
+ def _routing_reaches(self):
797
+ return [reach_id for reach_id in self.get_node_type_ids('RCHRES') if reach_id not in self.catchment_ids]
798
+
739
799
  def paths(self,reach_id):
740
800
  return paths(self.G,reach_id)
741
801
 
@@ -799,6 +859,7 @@ def subwatersheds(uci):
799
859
  schematic = schematic[schematic['TVOL'] == 'RCHRES'][['SVOLNO','TVOLNO','AFACTR','MLNO']].astype({'SVOLNO':int,'TVOLNO':int,'AFACTR':float,'MLNO':int})
800
860
  schematic.reset_index(inplace=True,drop=False)
801
861
  schematic.set_index('TVOLNO',inplace=True)
862
+ schematic = schematic.loc[catchment_ids(uci.network.G)]
802
863
 
803
864
  dfs = []
804
865
  for operation in ['PERLND','IMPLND']:
@@ -6,7 +6,7 @@ Created on Mon Apr 11 08:26:04 2022
6
6
  """
7
7
  import numpy as np
8
8
  import pandas as pd
9
- from pyhspf import helpers
9
+ from . import helpers
10
10
  from pathlib import Path
11
11
 
12
12
  #timeseries_catalog = pd.read_csv(Path(__file__).parent/'TIMESERIES_CATALOG.csv')
@@ -78,7 +78,7 @@ class UCI():
78
78
 
79
79
  def table(self,block,table_name = 'na',table_id = 0,drop_comments = True):
80
80
  # Dynamic parsing of tables when called by user
81
- assert block in ['FILES','PERLND','IMPLND','RCHRES','SCHEMATIC','OPN SEQUENCE','MASS-LINK','EXT SOURCES','NETWORK','GENER','MONTH-DATA','EXT TARGETS','COPY']
81
+ assert block in ['FILES','PERLND','IMPLND','RCHRES','SCHEMATIC','OPN SEQUENCE','MASS-LINK','EXT SOURCES','NETWORK','GENER','MONTH-DATA','EXT TARGETS','COPY','FTABLES']
82
82
 
83
83
  table = self.uci[(block,table_name,table_id)] #[block][table_name][table_id]
84
84
  #TODO move the format_opnids into the Table class?
hspf-2.0.0/.gitignore DELETED
@@ -1 +0,0 @@
1
- *.pyc
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes