wolfhece 2.0.44__py3-none-any.whl → 2.0.46__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- wolfhece/GraphProfile.py +26 -13
- wolfhece/Model1D.py +1562 -319
- wolfhece/PyCrosssections.py +9 -8
- wolfhece/PyDraw.py +17 -10
- wolfhece/PyVertexvectors.py +39 -27
- wolfhece/apps/version.py +1 -1
- wolfhece/drawing_obj.py +10 -0
- wolfhece/hydrology/Catchment.py +329 -5
- wolfhece/hydrology/Comparison.py +34 -25
- wolfhece/hydrology/Optimisation.py +1049 -117
- wolfhece/hydrology/RetentionBasin.py +200 -45
- wolfhece/hydrology/SubBasin.py +614 -31
- wolfhece/hydrology/constant.py +2 -2
- wolfhece/hydrology/cst_exchanges.py +35 -0
- wolfhece/hydrology/plot_hydrology.py +17 -19
- wolfhece/hydrology/read.py +63 -4
- wolfhece/libs/WolfDll.dll +0 -0
- wolfhece/libs/WolfDll_debug.dll +0 -0
- wolfhece/pyGui1D.py +114 -42
- wolfhece/scenario/check_scenario.py +1 -1
- wolfhece/scenario/config_manager.py +46 -1
- {wolfhece-2.0.44.dist-info → wolfhece-2.0.46.dist-info}/METADATA +1 -1
- {wolfhece-2.0.44.dist-info → wolfhece-2.0.46.dist-info}/RECORD +26 -26
- {wolfhece-2.0.44.dist-info → wolfhece-2.0.46.dist-info}/WHEEL +0 -0
- {wolfhece-2.0.44.dist-info → wolfhece-2.0.46.dist-info}/entry_points.txt +0 -0
- {wolfhece-2.0.44.dist-info → wolfhece-2.0.46.dist-info}/top_level.txt +0 -0
wolfhece/hydrology/SubBasin.py
CHANGED
@@ -16,6 +16,7 @@ import datetime # module which contains objects treating
|
|
16
16
|
from matplotlib.font_manager import FontProperties
|
17
17
|
from dbfread import DBF
|
18
18
|
import ctypes as ct
|
19
|
+
import pandas as pd
|
19
20
|
|
20
21
|
from ..PyTranslate import _
|
21
22
|
from . import plot_hydrology as ph
|
@@ -116,8 +117,9 @@ class SubBasin:
|
|
116
117
|
_version:float # version of the wolfHydro python code. Useful for identifying the file versions to read and how to interpret them
|
117
118
|
|
118
119
|
|
119
|
-
def __init__(self, _dateBegin, _dateEnd, _deltaT, _model,_workingDir
|
120
|
-
|
120
|
+
def __init__(self, _dateBegin:datetime.datetime=None, _dateEnd:datetime.datetime=None, _deltaT:int=0, _model=cst.measures,_workingDir:str="",
|
121
|
+
_hyeto:dict={}, _x:float=0.0, _y:float=0.0, surfaceDrained:float=0.0,
|
122
|
+
_iD_interiorPoint:int=1,_idSorted:int=1, name:str=None, readHydro=True, _tz:int=0, version:str=cst.VERSION_WOLFHYDRO):
|
121
123
|
if(name is None):
|
122
124
|
self.name = 'ss '+ str(_iD_interiorPoint)
|
123
125
|
else:
|
@@ -189,8 +191,8 @@ class SubBasin:
|
|
189
191
|
self.landuseHydroDict = {} # Dictionnary with all landuses of the hydro subbasin
|
190
192
|
|
191
193
|
# Further information
|
192
|
-
self.surfaceDrained =
|
193
|
-
self.surfaceDrainedHydro =
|
194
|
+
self.surfaceDrained = surfaceDrained # [km^2]
|
195
|
+
self.surfaceDrainedHydro = surfaceDrained # [km^2]
|
194
196
|
self.timeDelay = 0.0 # [s]
|
195
197
|
|
196
198
|
self.peakVal = 0.0 # [m³/s] peak value for total outFlow
|
@@ -279,8 +281,22 @@ class SubBasin:
|
|
279
281
|
timeArray = np.zeros(len(matrixData)) # +1 as the time array is not one element more than the outlet in UH
|
280
282
|
outFlow = np.zeros(len(matrixData),dtype=ct.c_double, order='F')
|
281
283
|
|
284
|
+
# Init the time properties if not already done
|
285
|
+
if self.dateBegin is None or self.dateEnd is None or self.deltaT == 0:
|
286
|
+
self.dateBegin = datetime.datetime(year=int(matrixData[0][2]), month=int(matrixData[0][1]), day=int(matrixData[0][0]),
|
287
|
+
hour=int(matrixData[0][3]), minute=int(matrixData[0][4]), second=int(matrixData[0][5]),
|
288
|
+
microsecond=0, tzinfo=datetime.timezone.utc)
|
289
|
+
self.dateEnd = datetime.datetime(year=int(matrixData[-1][2]), month=int(matrixData[-1][1]), day=int(matrixData[-1][0]),
|
290
|
+
hour=int(matrixData[-1][3]), minute=int(matrixData[-1][4]), second=int(matrixData[-1][5]),
|
291
|
+
microsecond=0, tzinfo=datetime.timezone.utc)
|
292
|
+
self.deltaT = (datetime.datetime(year=int(matrixData[1][2]), month=int(matrixData[1][1]), day=int(matrixData[1][0]),
|
293
|
+
hour=int(matrixData[1][3]), minute=int(matrixData[1][4]), second=int(matrixData[1][5]),
|
294
|
+
microsecond=0, tzinfo=datetime.timezone.utc) - self.dateBegin).total_seconds()
|
295
|
+
|
282
296
|
secondsInDay = 24*60*60
|
283
|
-
prevDate = datetime.datetime(year=int(matrixData[0][2]), month=int(matrixData[0][1]), day=int(matrixData[0][0]),
|
297
|
+
prevDate = datetime.datetime(year=int(matrixData[0][2]), month=int(matrixData[0][1]), day=int(matrixData[0][0]),
|
298
|
+
hour=int(matrixData[0][3]), minute=int(matrixData[0][4]), second=int(matrixData[0][5]),
|
299
|
+
microsecond=0, tzinfo=datetime.timezone.utc)
|
284
300
|
prevDate -= tzDelta
|
285
301
|
if(self.dateBegin!=prevDate):
|
286
302
|
print("ERROR: The first date in hydro data does not coincide with the one expected!")
|
@@ -298,9 +314,13 @@ class SubBasin:
|
|
298
314
|
nbData = len(matrixData)
|
299
315
|
|
300
316
|
for i in range(1,nbData):
|
301
|
-
currDate = datetime.datetime(year=int(matrixData[i][2]), month=int(matrixData[i][1]), day=int(matrixData[i][0]),
|
317
|
+
currDate = datetime.datetime(year=int(matrixData[i][2]), month=int(matrixData[i][1]), day=int(matrixData[i][0]),
|
318
|
+
hour=int(matrixData[i][3]), minute=int(matrixData[i][4]), second=int(matrixData[i][5]),
|
319
|
+
microsecond=0, tzinfo=datetime.timezone.utc)
|
302
320
|
currDate -= tzDelta
|
303
|
-
prevDate = datetime.datetime(year=int(matrixData[i-1][2]), month=int(matrixData[i-1][1]), day=int(matrixData[i-1][0]),
|
321
|
+
prevDate = datetime.datetime(year=int(matrixData[i-1][2]), month=int(matrixData[i-1][1]), day=int(matrixData[i-1][0]),
|
322
|
+
hour=int(matrixData[i-1][3]), minute=int(matrixData[i-1][4]), second=int(matrixData[i-1][5]),
|
323
|
+
microsecond=0, tzinfo=datetime.timezone.utc)
|
304
324
|
prevDate -= tzDelta
|
305
325
|
diffDate = currDate - prevDate
|
306
326
|
diffTimeInSeconds = diffDate.days*secondsInDay + diffDate.seconds
|
@@ -339,6 +359,9 @@ class SubBasin:
|
|
339
359
|
print("ERROR: the dates read are not consitent with the dates already recored in this subbasin!")
|
340
360
|
sys.exit()
|
341
361
|
|
362
|
+
if self._version<2022.0:
|
363
|
+
outFlow[:] = outFlow[:]/self.surfaceDrained*3.6
|
364
|
+
|
342
365
|
elif(self.model==cst.tom_2layers_linIF or self.model==cst.tom_2layers_UH):
|
343
366
|
# For this model, there are 3 different layers to read.
|
344
367
|
|
@@ -394,6 +417,18 @@ class SubBasin:
|
|
394
417
|
# Init of the outflow array
|
395
418
|
outFlow = np.zeros((len(matrixData),2),dtype=ct.c_double, order='F')
|
396
419
|
|
420
|
+
# Init the time properties if not already done
|
421
|
+
if self.dateBegin is None or self.dateEnd is None or self.deltaT == 0:
|
422
|
+
self.dateBegin = datetime.datetime(year=int(matrixData[0][2]), month=int(matrixData[0][1]), day=int(matrixData[0][0]),
|
423
|
+
hour=int(matrixData[0][3]), minute=int(matrixData[0][4]), second=int(matrixData[0][5]),
|
424
|
+
microsecond=0, tzinfo=datetime.timezone.utc)
|
425
|
+
self.dateEnd = datetime.datetime(year=int(matrixData[-1][2]), month=int(matrixData[-1][1]), day=int(matrixData[-1][0]),
|
426
|
+
hour=int(matrixData[-1][3]), minute=int(matrixData[-1][4]), second=int(matrixData[-1][5]),
|
427
|
+
microsecond=0, tzinfo=datetime.timezone.utc)
|
428
|
+
self.deltaT = (datetime.datetime(year=int(matrixData[1][2]), month=int(matrixData[1][1]), day=int(matrixData[1][0]),
|
429
|
+
hour=int(matrixData[1][3]), minute=int(matrixData[1][4]), second=int(matrixData[1][5]),
|
430
|
+
microsecond=0, tzinfo=datetime.timezone.utc) - self.dateBegin).total_seconds()
|
431
|
+
|
397
432
|
secondsInDay = 24*60*60
|
398
433
|
prevDate = datetime.datetime(year=int(matrixData[0][2]), month=int(matrixData[0][1]), day=int(matrixData[0][0]), hour=int(matrixData[0][3]), minute=int(matrixData[0][4]), second=int(matrixData[0][5]), microsecond=0, tzinfo=datetime.timezone.utc)
|
399
434
|
prevDate -= tzDelta
|
@@ -410,9 +445,13 @@ class SubBasin:
|
|
410
445
|
outFlow[0][0] = matrixData[0][6]
|
411
446
|
for i in range(1,len(matrixData)):
|
412
447
|
|
413
|
-
currDate = datetime.datetime(year=int(matrixData[i][2]), month=int(matrixData[i][1]), day=int(matrixData[i][0]),
|
448
|
+
currDate = datetime.datetime(year=int(matrixData[i][2]), month=int(matrixData[i][1]), day=int(matrixData[i][0]),
|
449
|
+
hour=int(matrixData[i][3]), minute=int(matrixData[i][4]), second=int(matrixData[i][5]),
|
450
|
+
microsecond=0, tzinfo=datetime.timezone.utc)
|
414
451
|
currDate -= tzDelta
|
415
|
-
prevDate = datetime.datetime(year=int(matrixData[i-1][2]), month=int(matrixData[i-1][1]), day=int(matrixData[i-1][0]),
|
452
|
+
prevDate = datetime.datetime(year=int(matrixData[i-1][2]), month=int(matrixData[i-1][1]), day=int(matrixData[i-1][0]),
|
453
|
+
hour=int(matrixData[i-1][3]), minute=int(matrixData[i-1][4]), second=int(matrixData[i-1][5]),
|
454
|
+
microsecond=0, tzinfo=datetime.timezone.utc)
|
416
455
|
prevDate -= tzDelta
|
417
456
|
diffDate = currDate - prevDate
|
418
457
|
diffTimeInSeconds = diffDate.days*secondsInDay + diffDate.seconds
|
@@ -511,7 +550,6 @@ class SubBasin:
|
|
511
550
|
for i in range(1,len(matrixData)):
|
512
551
|
outFlow[i][1] = matrixData[i][6]
|
513
552
|
|
514
|
-
|
515
553
|
elif(self.model==cst.tom_VHM):
|
516
554
|
# For this model, there are 3 different layers to read.
|
517
555
|
|
@@ -539,6 +577,19 @@ class SubBasin:
|
|
539
577
|
i += 1
|
540
578
|
matrixData = np.array(list_data).astype("float")
|
541
579
|
timeArray = np.zeros(len(matrixData))
|
580
|
+
|
581
|
+
# Init the time properties if not already done
|
582
|
+
if self.dateBegin is None or self.dateEnd is None or self.deltaT == 0:
|
583
|
+
self.dateBegin = datetime.datetime(year=int(matrixData[0][2]), month=int(matrixData[0][1]), day=int(matrixData[0][0]),
|
584
|
+
hour=int(matrixData[0][3]), minute=int(matrixData[0][4]), second=int(matrixData[0][5]),
|
585
|
+
microsecond=0, tzinfo=datetime.timezone.utc)
|
586
|
+
self.dateEnd = datetime.datetime(year=int(matrixData[-1][2]), month=int(matrixData[-1][1]), day=int(matrixData[-1][0]),
|
587
|
+
hour=int(matrixData[-1][3]), minute=int(matrixData[-1][4]), second=int(matrixData[-1][5]),
|
588
|
+
microsecond=0, tzinfo=datetime.timezone.utc)
|
589
|
+
self.deltaT = (datetime.datetime(year=int(matrixData[1][2]), month=int(matrixData[1][1]), day=int(matrixData[1][0]),
|
590
|
+
hour=int(matrixData[1][3]), minute=int(matrixData[1][4]), second=int(matrixData[1][5]),
|
591
|
+
microsecond=0, tzinfo=datetime.timezone.utc) - self.dateBegin).total_seconds()
|
592
|
+
|
542
593
|
# Init of the outflow array
|
543
594
|
outFlow = np.zeros((len(matrixData),3),dtype=ct.c_double, order='F')
|
544
595
|
|
@@ -635,7 +686,6 @@ class SubBasin:
|
|
635
686
|
# outFlow[i][2] = matrixData[i][6]*self.surfaceDrained/3.6
|
636
687
|
outFlow[i][2] = matrixData[i][6]
|
637
688
|
|
638
|
-
|
639
689
|
elif(self.model==cst.tom_GR4):
|
640
690
|
# For this model, there is only 1 output to consider.
|
641
691
|
|
@@ -669,6 +719,18 @@ class SubBasin:
|
|
669
719
|
# Init of the outflow array
|
670
720
|
outFlow = np.zeros((len(matrixData),1),dtype=ct.c_double, order='F')
|
671
721
|
|
722
|
+
# Init the time properties if not already done
|
723
|
+
if self.dateBegin is None or self.dateEnd is None or self.deltaT == 0:
|
724
|
+
self.dateBegin = datetime.datetime(year=int(matrixData[0][2]), month=int(matrixData[0][1]), day=int(matrixData[0][0]),
|
725
|
+
hour=int(matrixData[0][3]), minute=int(matrixData[0][4]), second=int(matrixData[0][5]),
|
726
|
+
microsecond=0, tzinfo=datetime.timezone.utc)
|
727
|
+
self.dateEnd = datetime.datetime(year=int(matrixData[-1][2]), month=int(matrixData[-1][1]), day=int(matrixData[-1][0]),
|
728
|
+
hour=int(matrixData[-1][3]), minute=int(matrixData[-1][4]), second=int(matrixData[-1][5]),
|
729
|
+
microsecond=0, tzinfo=datetime.timezone.utc)
|
730
|
+
self.deltaT = (datetime.datetime(year=int(matrixData[1][2]), month=int(matrixData[1][1]), day=int(matrixData[1][0]),
|
731
|
+
hour=int(matrixData[1][3]), minute=int(matrixData[1][4]), second=int(matrixData[1][5]),
|
732
|
+
microsecond=0, tzinfo=datetime.timezone.utc) - self.dateBegin).total_seconds()
|
733
|
+
|
672
734
|
secondsInDay = 24*60*60
|
673
735
|
prevDate = datetime.datetime(year=int(matrixData[0][2]), month=int(matrixData[0][1]), day=int(matrixData[0][0]), hour=int(matrixData[0][3]), minute=int(matrixData[0][4]), second=int(matrixData[0][5]), microsecond=0, tzinfo=datetime.timezone.utc)
|
674
736
|
prevDate -= tzDelta
|
@@ -681,9 +743,13 @@ class SubBasin:
|
|
681
743
|
# outFlow[0][0] = matrixData[0][6]*self.surfaceDrained/3.6
|
682
744
|
outFlow[0][0] = matrixData[0][6]
|
683
745
|
for i in range(1,len(matrixData)):
|
684
|
-
currDate = datetime.datetime(year=int(matrixData[i][2]), month=int(matrixData[i][1]), day=int(matrixData[i][0]),
|
746
|
+
currDate = datetime.datetime(year=int(matrixData[i][2]), month=int(matrixData[i][1]), day=int(matrixData[i][0]),
|
747
|
+
hour=int(matrixData[i][3]), minute=int(matrixData[i][4]), second=int(matrixData[i][5]),
|
748
|
+
microsecond=0, tzinfo=datetime.timezone.utc)
|
685
749
|
currDate -= tzDelta
|
686
|
-
prevDate = datetime.datetime(year=int(matrixData[i-1][2]), month=int(matrixData[i-1][1]), day=int(matrixData[i-1][0]),
|
750
|
+
prevDate = datetime.datetime(year=int(matrixData[i-1][2]), month=int(matrixData[i-1][1]), day=int(matrixData[i-1][0]),
|
751
|
+
hour=int(matrixData[i-1][3]), minute=int(matrixData[i-1][4]), second=int(matrixData[i-1][5]),
|
752
|
+
microsecond=0, tzinfo=datetime.timezone.utc)
|
687
753
|
prevDate -= tzDelta
|
688
754
|
diffDate = currDate - prevDate
|
689
755
|
diffTimeInSeconds = diffDate.days*secondsInDay + diffDate.seconds
|
@@ -716,7 +782,6 @@ class SubBasin:
|
|
716
782
|
print("ERROR: the dates read are not consitent with the dates already recored in this subbasin!")
|
717
783
|
sys.exit()
|
718
784
|
|
719
|
-
|
720
785
|
elif(self.model==cst.measures):
|
721
786
|
print("Reading the measurements outlet file...")
|
722
787
|
if(type(fileNames)!=str):
|
@@ -736,7 +801,25 @@ class SubBasin:
|
|
736
801
|
i += 1
|
737
802
|
|
738
803
|
matrixData = np.array(list_data).astype("float")
|
739
|
-
# Init
|
804
|
+
# Init the time properties if not already done
|
805
|
+
if self.dateBegin is None or self.dateEnd is None or self.deltaT == 0:
|
806
|
+
if nbCl==5:
|
807
|
+
self.dateBegin = datetime.datetime(year=int(matrixData[0][2]), month=int(matrixData[0][1]), day=int(matrixData[0][0]), hour=int(matrixData[0][3]), tzinfo=datetime.timezone.utc)
|
808
|
+
self.dateEnd = datetime.datetime(year=int(matrixData[-1][2]), month=int(matrixData[-1][1]), day=int(matrixData[-1][0]), hour=int(matrixData[-1][3]), tzinfo=datetime.timezone.utc)
|
809
|
+
self.deltaT = (datetime.datetime(year=int(matrixData[1][2]), month=int(matrixData[1][1]), day=int(matrixData[1][0]), hour=int(matrixData[1][3]), tzinfo=datetime.timezone.utc)
|
810
|
+
- self.dateBegin).total_seconds()
|
811
|
+
if nbCl==7:
|
812
|
+
self.dateBegin = datetime.datetime(year=int(matrixData[0][2]), month=int(matrixData[0][1]), day=int(matrixData[0][0]),
|
813
|
+
hour=int(matrixData[0][3]), minute=int(matrixData[0][4]), second=int(matrixData[0][5]),
|
814
|
+
microsecond=0, tzinfo=datetime.timezone.utc)
|
815
|
+
self.dateEnd = datetime.datetime(year=int(matrixData[-1][2]), month=int(matrixData[-1][1]), day=int(matrixData[-1][0]),
|
816
|
+
hour=int(matrixData[-1][3]), minute=int(matrixData[-1][4]), second=int(matrixData[-1][5]),
|
817
|
+
microsecond=0, tzinfo=datetime.timezone.utc)
|
818
|
+
self.deltaT = (datetime.datetime(year=int(matrixData[1][2]), month=int(matrixData[1][1]), day=int(matrixData[1][0]),
|
819
|
+
hour=int(matrixData[1][3]), minute=int(matrixData[1][4]), second=int(matrixData[1][5]),
|
820
|
+
microsecond=0, tzinfo=datetime.timezone.utc) - self.dateBegin).total_seconds()
|
821
|
+
|
822
|
+
# Init of the outflow array
|
740
823
|
timeInterval = self.dateEnd-self.dateBegin+datetime.timedelta(seconds=self.deltaT)
|
741
824
|
outFlow = np.zeros(int(timeInterval.total_seconds()/self.deltaT),dtype=ct.c_double, order='F')
|
742
825
|
timeArray = np.zeros(int(timeInterval.total_seconds()/self.deltaT))
|
@@ -816,7 +899,6 @@ class SubBasin:
|
|
816
899
|
print("ERROR: the dates read are not consitent with the dates already recored in this subbasin!")
|
817
900
|
sys.exit()
|
818
901
|
|
819
|
-
|
820
902
|
elif(self.model==cst.compare_opti):
|
821
903
|
print("Reading the measurements outlet file...")
|
822
904
|
if(type(fileNames)!=str):
|
@@ -837,6 +919,13 @@ class SubBasin:
|
|
837
919
|
i += 1
|
838
920
|
|
839
921
|
matrixData = np.array(list_data).astype("float")
|
922
|
+
|
923
|
+
# Init the time properties if not already done
|
924
|
+
if self.dateBegin is None or self.dateEnd is None or self.deltaT == 0:
|
925
|
+
self.dateBegin = datetime.datetime.fromtimestamp(matrixData[0][0], tz=datetime.timezone.utc)
|
926
|
+
self.dateEnd = datetime.datetime.fromtimestamp(matrixData[-1][0], tz=datetime.timezone.utc)
|
927
|
+
self.deltaT = matrixData[1][0]-matrixData[0][0]
|
928
|
+
|
840
929
|
# Init of the outflow array
|
841
930
|
timeInterval = self.dateEnd-self.dateBegin+datetime.timedelta(seconds=self.deltaT)
|
842
931
|
# outFlow = np.zeros(int(timeInterval.total_seconds()/self.deltaT),dtype=ct.c_double, order='F')
|
@@ -1039,7 +1128,8 @@ class SubBasin:
|
|
1039
1128
|
nameOutFlow = list(self._outFlow.items())[0][0]
|
1040
1129
|
# Sum all the inlets hydrographs
|
1041
1130
|
self.sum_inlets()
|
1042
|
-
if(self.model==cst.tom_UH or self.model==cst.measures or
|
1131
|
+
if(self.model==cst.tom_UH or self.model==cst.measures or
|
1132
|
+
self.model==cst.tom_GR4 or self.model==cst.compare_opti):
|
1043
1133
|
tmpHydro = np.zeros(len(self.myHydro),dtype=ct.c_double, order='F')
|
1044
1134
|
if(self.model==cst.tom_GR4):
|
1045
1135
|
tmpHydro = self.myHydro[:,0]*self.surfaceDrained/3.6
|
@@ -2557,7 +2647,7 @@ class SubBasin:
|
|
2557
2647
|
allInlets = []
|
2558
2648
|
|
2559
2649
|
for element in self.intletsObj:
|
2560
|
-
allInlets.append(self.intletsObj[element].name)
|
2650
|
+
allInlets.append(str(self.intletsObj[element].name))
|
2561
2651
|
|
2562
2652
|
return allInlets
|
2563
2653
|
|
@@ -2569,6 +2659,11 @@ class SubBasin:
|
|
2569
2659
|
timeDelays = self.intletsObj[element].get_timeDelays(timeDelays)
|
2570
2660
|
|
2571
2661
|
return timeDelays
|
2662
|
+
|
2663
|
+
|
2664
|
+
def get_timeDelays_inlets(self):
|
2665
|
+
|
2666
|
+
return {el.name: el.timeDelay-self.timeDelay for el in self.intletsObj.values()}
|
2572
2667
|
|
2573
2668
|
|
2574
2669
|
def get_surface_proportions(self, show=True):
|
@@ -2667,16 +2762,16 @@ class SubBasin:
|
|
2667
2762
|
self.intletsObj[element].save_timeDelays()
|
2668
2763
|
|
2669
2764
|
|
2670
|
-
def get_myHydro(self, unit:str="mm/h"):
|
2765
|
+
def get_myHydro(self, unit:str="mm/h") -> np.ndarray:
|
2671
2766
|
|
2672
|
-
if unit=="m3/s" or "m^3/s":
|
2767
|
+
if unit=="m3/s" or unit=="m^3/s":
|
2673
2768
|
if self.model == cst.measures:
|
2674
2769
|
# FIXME we consider so far that myHydro of a measures are in m^3/h
|
2675
2770
|
myHydro = self.myHydro
|
2676
2771
|
elif self.surfaceDrained<=0.0:
|
2677
2772
|
logging.error("The surface drained is negative or equal to zero! myHydro will be given in mm/h!")
|
2678
2773
|
if len(np.shape(self.myHydro)) == 1:
|
2679
|
-
myHydro = self.myHydro
|
2774
|
+
myHydro = self.myHydro.copy()
|
2680
2775
|
else:
|
2681
2776
|
myHydro = np.sum(self.myHydro,1)
|
2682
2777
|
else:
|
@@ -2686,7 +2781,7 @@ class SubBasin:
|
|
2686
2781
|
myHydro = np.sum(self.myHydro,1)*self.surfaceDrained/3.6
|
2687
2782
|
else:
|
2688
2783
|
if len(np.shape(self.myHydro)) == 1:
|
2689
|
-
myHydro = self.myHydro
|
2784
|
+
myHydro = self.myHydro.copy()
|
2690
2785
|
else:
|
2691
2786
|
myHydro = np.sum(self.myHydro,1)
|
2692
2787
|
|
@@ -2840,11 +2935,7 @@ class SubBasin:
|
|
2840
2935
|
|
2841
2936
|
def get_outFlow_names(self)->list:
|
2842
2937
|
|
2843
|
-
|
2844
|
-
for key in self._outFlow:
|
2845
|
-
names.append(key)
|
2846
|
-
|
2847
|
-
return names
|
2938
|
+
return list(self._outFlow.keys())
|
2848
2939
|
|
2849
2940
|
|
2850
2941
|
def change_version(self, newVersion=None):
|
@@ -2866,7 +2957,7 @@ class SubBasin:
|
|
2866
2957
|
|
2867
2958
|
## This procedure is updating all the hydrographs of all upstream elements imposing limits
|
2868
2959
|
# @var level_min integer that specify the potential level at which the update should be stopped.
|
2869
|
-
def update_upstream_hydro(self, level_min:int=1):
|
2960
|
+
def update_upstream_hydro(self, level_min:int=1, update_upstream:bool=True):
|
2870
2961
|
|
2871
2962
|
for key in self.intletsObj:
|
2872
2963
|
curObj = self.intletsObj[key]
|
@@ -3013,14 +3104,506 @@ class SubBasin:
|
|
3013
3104
|
return peak_s
|
3014
3105
|
|
3015
3106
|
|
3107
|
+
def collect_x_from_production(self) -> dict[str,np.array]:
|
3108
|
+
"""
|
3109
|
+
This procedure is collecting all the time series fractions of each outflow of the hydrological production models written in Fortran
|
3110
|
+
|
3111
|
+
Returns:
|
3112
|
+
dict[str, np.array]: A dictionary containing the fractions of each outflow.
|
3113
|
+
"""
|
3114
|
+
all_x = {}
|
3115
|
+
|
3116
|
+
if self.model == cst.tom_VHM:
|
3117
|
+
all_x = self.collect_x_VHM()
|
3118
|
+
elif self.model == cst.tom_GR4:
|
3119
|
+
all_x = self.collect_x_GR4()
|
3120
|
+
elif self.model == cst.tom_2layers_linIF:
|
3121
|
+
all_x = self.collect_x_2layers()
|
3122
|
+
|
3123
|
+
return all_x
|
3124
|
+
|
3125
|
+
def collect_fractions(self) -> dict[str,np.array]:
|
3126
|
+
"""
|
3127
|
+
This procedure is collecting all the fractions of each outflow of the hydrological production models.
|
3128
|
+
|
3129
|
+
Returns:
|
3130
|
+
dict[str, np.array]: A dictionary containing the fractions of each outflow.
|
3131
|
+
"""
|
3132
|
+
all_x = self.collect_x_from_production()
|
3133
|
+
|
3134
|
+
if self.model == cst.tom_VHM:
|
3135
|
+
all_f = self._collect_fractions_VHM(all_x)
|
3136
|
+
elif self.model == cst.tom_GR4:
|
3137
|
+
all_f = self._collect_fractions_GR4(all_x)
|
3138
|
+
elif self.model == cst.tom_2layers_linIF:
|
3139
|
+
all_f = self._collect_fractions_2layers(all_x)
|
3140
|
+
|
3141
|
+
return all_f
|
3142
|
+
|
3143
|
+
|
3144
|
+
def collect_all_internal_variables(self) -> dict[str,np.array]:
|
3145
|
+
"""
|
3146
|
+
This procedure is collecting all internal variables of the hydrological production models.
|
3147
|
+
|
3148
|
+
Returns:
|
3149
|
+
dict[str, np.array]: A dictionary containing the fractions of each outflow.
|
3150
|
+
"""
|
3151
|
+
all_iv = {}
|
3152
|
+
|
3153
|
+
if self.model == cst.tom_VHM:
|
3154
|
+
all_iv = self.collect_iv_VHM()
|
3155
|
+
elif self.model == cst.tom_GR4:
|
3156
|
+
all_iv = self.collect_iv_GR4()
|
3157
|
+
elif self.model == cst.tom_2layers_linIF:
|
3158
|
+
all_iv = self.collect_iv_2layers()
|
3159
|
+
|
3160
|
+
return all_iv
|
3161
|
+
|
3162
|
+
|
3163
|
+
def activate_all_internal_variables(self):
|
3164
|
+
"""
|
3165
|
+
This procedure is activating all internal variables of all the hydrological modules.
|
3166
|
+
"""
|
3167
|
+
if self.model == cst.tom_VHM:
|
3168
|
+
self.activate_all_iv_VHM()
|
3169
|
+
elif self.model == cst.tom_GR4:
|
3170
|
+
self.activate_all_iv_GR4()
|
3171
|
+
elif self.model == cst.tom_2layers_linIF:
|
3172
|
+
self.activate_all_iv_2layers()
|
3173
|
+
|
3174
|
+
|
3175
|
+
def collect_x_VHM(self) -> dict[str,np.array]:
|
3176
|
+
"""
|
3177
|
+
This procedure is collecting all the fractions of each outflow of the VHM model.
|
3178
|
+
|
3179
|
+
Returns:
|
3180
|
+
- all_x: A dictionary containing the fractions of each outflow of the VHM model.
|
3181
|
+
"""
|
3182
|
+
list_keys = ["x", "U"]
|
3183
|
+
files_per_keys = [["xbf", "xif", "xof", "xu"],[]]
|
3184
|
+
group = "Internal variables to save"
|
3185
|
+
param = "simul_soil"
|
3186
|
+
|
3187
|
+
all_x = self.collect_internal_variables(list_keys, files_per_keys,
|
3188
|
+
group_name=group, param_name=param)
|
3189
|
+
|
3190
|
+
return all_x
|
3191
|
+
|
3192
|
+
|
3193
|
+
def _collect_fractions_VHM(self, all_x:dict[str,np.array]) -> dict[str,np.array]:
|
3194
|
+
"""
|
3195
|
+
This procedure is collecting all the fractions of each outflow of the VHM model.
|
3196
|
+
|
3197
|
+
Returns:
|
3198
|
+
- all_f: A dictionary containing the fractions of each outflow of the VHM model.
|
3199
|
+
"""
|
3200
|
+
all_f = {}
|
3201
|
+
|
3202
|
+
if all_x=={}:
|
3203
|
+
return all_f
|
3204
|
+
|
3205
|
+
condition = self.myRain > 0.0
|
3206
|
+
|
3207
|
+
all_f["% qof"] = np.where(condition, all_x["xof"] * 100.0, np.nan)
|
3208
|
+
all_f["% qif"] = np.where(condition, all_x["xif"] * 100.0, np.nan)
|
3209
|
+
all_f["% qbf"] = np.where(condition, all_x["xbf"] * 100.0, np.nan)
|
3210
|
+
all_f["% loss"] = np.where(condition, all_x["xu"] * 100.0, np.nan)
|
3211
|
+
|
3212
|
+
return all_f
|
3213
|
+
|
3214
|
+
|
3215
|
+
def collect_iv_VHM(self) -> dict[str,np.array]:
|
3216
|
+
"""
|
3217
|
+
This procedure is collecting all internal variables of the VHM model in each module.
|
3218
|
+
|
3219
|
+
Returns:
|
3220
|
+
- all_iv: A dictionary containing all internal variables of the VHM model.
|
3221
|
+
"""
|
3222
|
+
list_keys = ["x", "U"]
|
3223
|
+
files_per_keys = [[],["U"]]
|
3224
|
+
group = "Internal variables to save"
|
3225
|
+
param = "simul_soil"
|
3226
|
+
|
3227
|
+
all_iv = self.collect_internal_variables(list_keys, files_per_keys,
|
3228
|
+
group_name=group, param_name=param)
|
3229
|
+
|
3230
|
+
return all_iv
|
3231
|
+
|
3232
|
+
|
3233
|
+
def activate_all_iv_VHM(self):
|
3234
|
+
"""
|
3235
|
+
This procedure is activating all internal variables of the VHM model in each module.
|
3236
|
+
"""
|
3237
|
+
list_keys = ["x", "U"]
|
3238
|
+
group = "Internal variables to save"
|
3239
|
+
param = "simul_soil"
|
3240
|
+
|
3241
|
+
self.activate_internal_variables(list_keys, group_name=group, param_name=param)
|
3242
|
+
|
3243
|
+
|
3244
|
+
def collect_x_GR4(self) -> dict[str,np.array]:
|
3245
|
+
"""
|
3246
|
+
This procedure is collecting the fractions of each outflow of the GR4 model.
|
3247
|
+
|
3248
|
+
Returns:
|
3249
|
+
dict[str, np.array]: A dictionary containing all fractions of each outflow of the GR4 model.
|
3250
|
+
"""
|
3251
|
+
all_x = {}
|
3252
|
+
|
3253
|
+
return all_x
|
3254
|
+
|
3255
|
+
|
3256
|
+
def _collect_fractions_GR4(self, all_x:dict[str,np.array]) -> dict[str,np.array]:
|
3257
|
+
"""
|
3258
|
+
This procedure is collecting all the fractions of each outflow of the GR4 model.
|
3259
|
+
|
3260
|
+
Returns:
|
3261
|
+
- all_f: A dictionary containing the fractions of each outflow of the GR4 model.
|
3262
|
+
"""
|
3263
|
+
all_f = {}
|
3264
|
+
|
3265
|
+
return all_f
|
3266
|
+
|
3016
3267
|
|
3017
|
-
|
3018
|
-
|
3268
|
+
def collect_iv_GR4(self) -> dict[str,np.array]:
|
3269
|
+
"""
|
3270
|
+
This procedure is collecting all internal variables of the GR4 model in each module.
|
3271
|
+
|
3272
|
+
Returns:
|
3273
|
+
- all_iv: A dictionary containing all internal variables of the GR4 model.
|
3274
|
+
"""
|
3275
|
+
all_iv = {}
|
3276
|
+
|
3277
|
+
return all_iv
|
3278
|
+
|
3279
|
+
|
3280
|
+
def activate_all_iv_GR4(self):
|
3281
|
+
"""
|
3282
|
+
This procedure is activating all internal variables of the GR4 model in each module.
|
3283
|
+
"""
|
3284
|
+
return
|
3285
|
+
|
3019
3286
|
|
3020
|
-
|
3287
|
+
def collect_x_2layers(self) -> dict[str,np.array]:
|
3288
|
+
"""
|
3289
|
+
This procedure is collecting the fractions of each outflow of the 2 layers model.
|
3290
|
+
|
3291
|
+
Returns:
|
3292
|
+
A dictionary containing the collected fractions of each outflow variables.
|
3293
|
+
"""
|
3294
|
+
list_keys = ["x", "U", "Reservoir"]
|
3295
|
+
files_per_keys = [["xif"], [], ["xp"]]
|
3296
|
+
group = "Internal variables to save"
|
3297
|
+
param = "simul_soil"
|
3298
|
+
|
3299
|
+
all_x = self.collect_internal_variables(list_keys, files_per_keys,
|
3300
|
+
group_name=group, param_name=param)
|
3301
|
+
|
3302
|
+
return all_x
|
3303
|
+
|
3304
|
+
|
3305
|
+
def _collect_fractions_2layers(self, all_x:dict[str,np.array]) -> dict[str,np.array]:
|
3306
|
+
"""
|
3307
|
+
This procedure is collecting all the fractions of each outflow of the 2 layers model.
|
3308
|
+
|
3309
|
+
Returns:
|
3310
|
+
- all_f: A dictionary containing the fractions of each outflow of the 2 layers model.
|
3311
|
+
"""
|
3312
|
+
all_f = {}
|
3313
|
+
|
3314
|
+
if all_x=={}:
|
3315
|
+
return all_f
|
3316
|
+
|
3317
|
+
condition = self.myRain > 0.0
|
3318
|
+
|
3319
|
+
f_if = np.where(condition, all_x["xp"] * all_x["xif"], np.nan)
|
3320
|
+
|
3321
|
+
all_f["% qof"] = (all_x["xp"] - f_if) * 100.0
|
3322
|
+
all_f["% qif"] = f_if * 100.0
|
3323
|
+
all_f["% loss"] = np.where(condition, (1.0 - all_x["xp"]) * 100.0, np.nan)
|
3324
|
+
|
3325
|
+
return all_f
|
3326
|
+
|
3327
|
+
|
3328
|
+
def collect_iv_2layers(self) -> dict[str,np.array]:
|
3329
|
+
"""
|
3330
|
+
This procedure is collecting all internal variables of the 2 layers model in each module.
|
3331
|
+
|
3332
|
+
Returns:
|
3333
|
+
- all_iv: A dictionary containing the fractions all internal variables of the 2 layers model.
|
3334
|
+
"""
|
3335
|
+
list_keys = ["x", "U", "Reservoir"]
|
3336
|
+
files_per_keys = [[], ["U"], ["S"]]
|
3337
|
+
group = "Internal variables to save"
|
3338
|
+
param = "simul_soil"
|
3339
|
+
|
3340
|
+
all_iv = self.collect_internal_variables(list_keys, files_per_keys,
|
3341
|
+
group_name=group, param_name=param)
|
3342
|
+
|
3343
|
+
return all_iv
|
3344
|
+
|
3345
|
+
|
3346
|
+
def activate_all_iv_2layers(self):
|
3347
|
+
"""
|
3348
|
+
This procedure is activating all internal variables of the 2 layers model in each module.
|
3349
|
+
"""
|
3350
|
+
list_keys = ["x", "U", "Reservoir"]
|
3351
|
+
group = "Internal variables to save"
|
3352
|
+
param = "simul_soil"
|
3353
|
+
|
3354
|
+
self.activate_internal_variables(list_keys, group_name=group, param_name=param)
|
3355
|
+
|
3356
|
+
|
3357
|
+
def collect_internal_variables(self, list_keys:list[str], files_per_keys:list[list[str]],
|
3358
|
+
group_name:str="Internal variables to save", param_name:str="simul_soil"
|
3359
|
+
)-> dict[str,np.array]:
|
3360
|
+
"""
|
3361
|
+
Collects all the internal variables of the 2 layers model.
|
3362
|
+
|
3363
|
+
Parameters:
|
3364
|
+
- list_keys (list[str]): List of keys representing the internal variables to collect.
|
3365
|
+
- files_per_keys (list[list[str]]): List of lists containing the file names associated with each key.
|
3366
|
+
- group_name (str, optional): Name of the group containing the internal variables to save. Default is "Internal variables to save".
|
3367
|
+
- production_name (str, optional): Name of the production file. Default is "simul_soil".
|
3368
|
+
|
3369
|
+
Returns:
|
3370
|
+
- dict[str,np.array]: A dictionary containing the collected internal variables, where the keys are the variable names and the values are numpy arrays.
|
3371
|
+
|
3372
|
+
"""
|
3373
|
+
all_iv = {}
|
3374
|
+
|
3375
|
+
production_file = ".".join([param_name,"param"])
|
3376
|
+
cur_dir = os.path.join(self.fileNameRead, "Subbasin_"+str(self.iDSorted))
|
3377
|
+
param_fileName = os.path.join(cur_dir, production_file)
|
3378
|
+
|
3379
|
+
wolf_soil = Wolf_Param(to_read=True, filename=param_fileName,toShow=False, init_GUI=False)
|
3380
|
+
|
3381
|
+
for index, curKey in enumerate(list_keys):
|
3382
|
+
ok_IV = wolf_soil.get_param(group_name, curKey, default_value=0)
|
3383
|
+
if ok_IV == 1:
|
3384
|
+
for curVar in files_per_keys[index]:
|
3385
|
+
ts_file = "".join([param_name, "_", curVar, ".dat"])
|
3386
|
+
isOk, tmp = rd.check_path(os.path.join(cur_dir, ts_file))
|
3387
|
+
if isOk<0:
|
3388
|
+
logging.warning("The file : " + ts_file + " does not exist!")
|
3389
|
+
continue
|
3390
|
+
time, cur_iv = rd.read_hydro_file(cur_dir, ts_file)
|
3391
|
+
all_iv[curVar] = cur_iv
|
3392
|
+
else:
|
3393
|
+
logging.warning("Please activate the interval variable : " + curKey + "to have access the following fraction of outlets : ")
|
3394
|
+
|
3395
|
+
return all_iv
|
3396
|
+
|
3397
|
+
|
3398
|
+
def activate_internal_variables(self, list_keys:list[str], group_name:str="Internal variables to save", param_name:str="simul_soil"):
|
3399
|
+
"""
|
3400
|
+
Activates all the internal variables of the 2 layers model.
|
3401
|
+
|
3402
|
+
Parameters:
|
3403
|
+
- list_keys (list[str]): List of keys representing the internal variables to collect.
|
3404
|
+
|
3405
|
+
"""
|
3406
|
+
production_file = ".".join([param_name,"param"])
|
3407
|
+
cur_dir = os.path.join(self.fileNameRead, "Subbasin_"+str(self.iDSorted))
|
3408
|
+
param_fileName = os.path.join(cur_dir, production_file)
|
3409
|
+
|
3410
|
+
wolf_soil = Wolf_Param(to_read=True, filename=param_fileName,toShow=False, init_GUI=False)
|
3411
|
+
|
3412
|
+
for curKey in list_keys:
|
3413
|
+
wolf_soil.change_param(group_name, curKey, 1)
|
3414
|
+
|
3415
|
+
wolf_soil.SavetoFile(None)
|
3416
|
+
wolf_soil.Reload(None)
|
3417
|
+
|
3418
|
+
return
|
3419
|
+
|
3420
|
+
def get_summary_fractions(self, summary:str="mean", all_f:dict={},
|
3421
|
+
interval:list[tuple[datetime.datetime, datetime.datetime]]=None) -> dict[str, np.array]:
|
3422
|
+
"""
|
3423
|
+
This procedure is returning a summary of the fractions of the current module.
|
3424
|
+
|
3425
|
+
Parameters:
|
3426
|
+
- summary (str): The type of summary to return.
|
3427
|
+
- interval (list[datetime.datetime], optional): The interval of time to consider. Default is None.
|
3428
|
+
|
3429
|
+
Returns:
|
3430
|
+
- dict: A dictionary containing the summary of the fractions of the current module.
|
3431
|
+
"""
|
3432
|
+
|
3433
|
+
if all_f == {}:
|
3434
|
+
all_f = self.collect_fractions()
|
3435
|
+
|
3436
|
+
if interval is not None:
|
3437
|
+
interv = np.zeros(len(self.time), dtype=bool)
|
3438
|
+
for el in interval:
|
3439
|
+
date_i = datetime.datetime.timestamp(el[0])
|
3440
|
+
date_f = datetime.datetime.timestamp(el[1])
|
3441
|
+
interv += (self.time>=date_i) & (self.time<=date_f)
|
3442
|
+
else:
|
3443
|
+
interv = np.ones(len(self.time), dtype=bool)
|
3444
|
+
|
3445
|
+
if summary == "mean":
|
3446
|
+
return {key: np.nanmean(all_f[key], where=interv) for key in all_f}
|
3447
|
+
elif summary == "median":
|
3448
|
+
return {key: np.nanmedian(all_f[key][interv]) for key in all_f}
|
3449
|
+
elif summary == "std":
|
3450
|
+
return {key: np.nanstd(all_f[key][interv]) for key in all_f}
|
3451
|
+
elif summary == "min":
|
3452
|
+
return {key: np.nanmin(all_f[key], where=interv) for key in all_f}
|
3453
|
+
elif summary == "max":
|
3454
|
+
return {key: np.nanmax(all_f[key], where=interv) for key in all_f}
|
3455
|
+
else:
|
3456
|
+
logging.error("The summary type is not recognised!")
|
3457
|
+
return []
|
3458
|
+
|
3459
|
+
|
3460
|
+
|
3461
|
+
def check_presence_of_iv(self):
|
3462
|
+
"""
|
3463
|
+
This procedure is checking the presence of internal variables in the current module.
|
3464
|
+
"""
|
3465
|
+
# TODO
|
3466
|
+
|
3467
|
+
return
|
3468
|
+
|
3469
|
+
|
3470
|
+
def get_all_Qtest(self, nb_atttempts=-1, typeOutFlow:str="Net", unit:str='m3/s', whichOutFlow="", lag:float=0.0) -> np.array:
|
3471
|
+
"""
|
3472
|
+
This function returns the Qtest hydrograph of the current module.
|
3473
|
+
|
3474
|
+
Parameters:
|
3475
|
+
- which (str, optional): The type of hydrograph to return. Default is "Net".
|
3476
|
+
|
3477
|
+
Returns:
|
3478
|
+
- np.array: The Qtest hydrograph of the current module.
|
3479
|
+
"""
|
3480
|
+
|
3481
|
+
# FIXME Take into account all the possible types of hydrographs and units
|
3482
|
+
file_debug_info = "simul_GR4_out_debuginfo.txt"
|
3483
|
+
prefix = "simul_GR4_out"
|
3484
|
+
|
3485
|
+
working_dir = os.path.join(self.fileNameRead, 'Subbasin_' + str(self.iDSorted) + '/')
|
3486
|
+
|
3487
|
+
q_test = []
|
3488
|
+
|
3489
|
+
file_debug_info = "_".join([prefix,"_debuginfo.txt"])
|
3490
|
+
if nb_atttempts < 0:
|
3491
|
+
with open(os.path.join(working_dir,file_debug_info), 'r') as file:
|
3492
|
+
lines = file.readline()
|
3493
|
+
items = lines.split('\t')
|
3494
|
+
nb_init = int(items[0])
|
3495
|
+
nb_max = int(items[0])
|
3496
|
+
nb_atttempts = nb_max
|
3497
|
+
|
3498
|
+
all_files = [os.path.join(working_dir,"".join([prefix,str(i+1)+".dat"])) for i in range(nb_atttempts)]
|
3499
|
+
areOk = [(rd.check_path(file)[0])>=0
|
3500
|
+
for file in all_files]
|
3501
|
+
max_index = next((i for i, x in enumerate(areOk) if x == False), len(areOk))
|
3502
|
+
q_test = [rd.read_hydro_file(working_dir, file_name)[1]*self.surfaceDrained/3.6
|
3503
|
+
for file_name in all_files[:max_index]]
|
3504
|
+
|
3505
|
+
|
3506
|
+
# for i in range(nb_atttempts):
|
3507
|
+
# file_name = "".join([prefix,str(i+1)+".dat"])
|
3508
|
+
# isOk, full_name = rd.check_path(os.path.join(working_dir, file_name))
|
3509
|
+
# if isOk<0:
|
3510
|
+
# break
|
3511
|
+
# t, cur_q = rd.read_hydro_file(working_dir, file_name)
|
3512
|
+
# cur_q = cur_q*self.surfaceDrained/3.6
|
3513
|
+
# q_test.append(cur_q)
|
3514
|
+
|
3515
|
+
return q_test
|
3516
|
+
|
3517
|
+
def plot_all_fractions(self, all_fractions:dict[str:np.array]={},figure=None, to_show:bool=False, writeDir:str="", range_data:list[datetime.datetime]=[]) -> None:
|
3518
|
+
|
3519
|
+
if writeDir == "":
|
3520
|
+
writeFile = os.path.join(self.fileNameWrite, "PostProcess", "_".join(["Q_fractions", self.name]))
|
3521
|
+
else:
|
3522
|
+
writeFile = os.path.join(writeDir, "_".join(["Q_fractions", self.name]))
|
3523
|
+
|
3524
|
+
if all_fractions == {}:
|
3525
|
+
all_fractions = self.collect_fractions()
|
3526
|
+
if all_fractions == {}:
|
3527
|
+
logging.warning("No fractions found!")
|
3528
|
+
return
|
3529
|
+
elif self.name in all_fractions:
|
3530
|
+
all_fractions = all_fractions[self.name]
|
3531
|
+
else:
|
3532
|
+
all_fractions = {}
|
3533
|
+
logging.warning("The name of the current module is not in the dictionary of fractions!")
|
3534
|
+
|
3535
|
+
nb_elements = len(all_fractions)
|
3536
|
+
if nb_elements == 0:
|
3537
|
+
logging.warning("No fractions found!")
|
3538
|
+
return
|
3539
|
+
y = [el for el in all_fractions.values()]
|
3540
|
+
x = [el for el in all_fractions.keys()]
|
3541
|
+
y_label = "Fractions of the outflows [%]"
|
3542
|
+
graph_title = "Fractions of the outflows of " + self.name
|
3543
|
+
|
3544
|
+
ph.plot_hydro(nb_elements, y,time=self.time, y_titles=y_label, y_labels=x, writeFile=writeFile, figure=figure, graph_title=graph_title, rangeData=range_data)
|
3545
|
+
|
3546
|
+
if to_show:
|
3547
|
+
plt.show()
|
3548
|
+
|
3549
|
+
|
3550
|
+
def evaluate_objective_function(self, unit="mm/h")->np.ndarray:
|
3551
|
+
"""
|
3552
|
+
This procedure is evaluating the objective function of the current module.
|
3553
|
+
|
3554
|
+
Returns:
|
3555
|
+
- np.ndarray: The objective function of the current module.
|
3556
|
+
"""
|
3557
|
+
# FIXME
|
3558
|
+
unit='mm/h'
|
3559
|
+
|
3560
|
+
return self.get_outFlow(unit=unit)
|
3561
|
+
|
3562
|
+
|
3563
|
+
|
3564
|
+
def import_from_pandas_Series(self, data:pd.Series, which="outFlow"):
|
3565
|
+
time = data.index.values.astype(np.int64) // 10 ** 9
|
3566
|
+
|
3567
|
+
if which == "outFlow":
|
3568
|
+
self._outFlow["Net"] = data.values.astype(dtype=ct.c_double, order='F')
|
3569
|
+
elif which == "outFlowRaw":
|
3570
|
+
self._outFlow["Raw"] = data.values.astype(dtype=ct.c_double, order='F')
|
3571
|
+
elif which == "myHydro":
|
3572
|
+
self.myHydro = data.values.astype(dtype=ct.c_double, order='F')
|
3573
|
+
elif which == "myRain":
|
3574
|
+
data = data.values.astype(dtype=np.double, order='F')
|
3575
|
+
elif which == "cumul_rain":
|
3576
|
+
data = data.values.astype(dtype=np.double, order='F')
|
3577
|
+
else:
|
3578
|
+
logging.error("Not a recognised 'which' argument!")
|
3579
|
+
logging.error("Try the following : 'ouflow', 'outFlowRaw', 'myHydro', 'myRain', 'cumul_rain'")
|
3580
|
+
|
3581
|
+
return
|
3582
|
+
|
3583
|
+
def export_to_pandas_Series(self, which="outFlow"):
|
3584
|
+
idx = pd.to_datetime(self.time, unit='s', utc=True)
|
3585
|
+
|
3586
|
+
if which == "outFlow":
|
3587
|
+
data = self.outFlow
|
3588
|
+
elif which == "outFlowRaw":
|
3589
|
+
data = self.outFlowRaw
|
3590
|
+
elif which == "myHydro":
|
3591
|
+
data = self.myHydro
|
3592
|
+
elif which == "myRain":
|
3593
|
+
data = self.myRain
|
3594
|
+
elif which == "cumul_rain":
|
3595
|
+
data = self.cumul_rain
|
3596
|
+
else:
|
3597
|
+
logging.error("Not a recognised 'which' argument!")
|
3598
|
+
logging.error("Try the following : 'ouflow', 'outFlowRaw', 'myHydro', 'myRain', 'cumul_rain'")
|
3599
|
+
return None
|
3600
|
+
|
3601
|
+
tserie = pd.Series(data, index=idx, copy=True, name=" ".join([self.name,which]))
|
3021
3602
|
|
3603
|
+
return tserie
|
3022
3604
|
|
3023
3605
|
|
3606
|
+
# def plot_Nash_vs_Qexcess(self, figure:plt.axis=None, toShow:bool=False, writeFile:str=""):
|
3024
3607
|
|
3025
3608
|
|
3026
3609
|
|