wolfhece 2.2.8__py3-none-any.whl → 2.2.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- wolfhece/PyDraw.py +94 -24
- wolfhece/PyGui.py +1 -0
- wolfhece/PyVertex.py +127 -19
- wolfhece/PyVertexvectors.py +73 -21
- wolfhece/__init__.py +5 -2
- wolfhece/apps/version.py +1 -1
- wolfhece/hydrology/Internal_variables.py +283 -0
- wolfhece/hydrology/Models_characteristics.py +223 -0
- wolfhece/hydrology/Optimisation.py +324 -14
- wolfhece/hydrology/SubBasin.py +112 -28
- wolfhece/hydrology/cst_exchanges.py +1 -0
- wolfhece/hydrometry/kiwis.py +8 -3
- wolfhece/lagrangian/particle_system_ui.py +1 -1
- wolfhece/lazviewer/processing/estimate_normals/estimate_normals.cp311-win_amd64.pyd +0 -0
- wolfhece/lazviewer/vfuncsdir/vfuncs.cp311-win_amd64.pyd +0 -0
- wolfhece/lazviewer/viewer/viewer.exe +0 -0
- wolfhece/lazviewer/viewer/viewer_310.exe +0 -0
- wolfhece/libs/WolfDll.dll +0 -0
- wolfhece/libs/get_infos.cp311-win_amd64.pyd +0 -0
- wolfhece/libs/verify_wolf.cp311-win_amd64.pyd +0 -0
- wolfhece/libs/wolfogl.cp311-win_amd64.pyd +0 -0
- wolfhece/pydike.py +1 -1
- wolfhece/pyviews.py +1 -1
- wolfhece/wolf_array.py +28 -6
- wolfhece-2.2.10.dist-info/METADATA +90 -0
- {wolfhece-2.2.8.dist-info → wolfhece-2.2.10.dist-info}/RECORD +33 -21
- {wolfhece-2.2.8.dist-info → wolfhece-2.2.10.dist-info}/WHEEL +1 -1
- {wolfhece-2.2.8.dist-info → wolfhece-2.3.0.dist-info}/METADATA +3 -3
- wolfhece-2.3.0.dist-info/WHEEL +5 -0
- wolfhece-2.3.0.dist-info/entry_points.txt +17 -0
- wolfhece-2.3.0.dist-info/top_level.txt +1 -0
- {wolfhece-2.2.8.dist-info → wolfhece-2.2.10.dist-info}/entry_points.txt +0 -0
- {wolfhece-2.2.8.dist-info → wolfhece-2.2.10.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,223 @@
|
|
1
|
+
from . import constant as cst
|
2
|
+
from . import cst_exchanges as cste
|
3
|
+
from . import Internal_variables as iv
|
4
|
+
|
5
|
+
VHM_VAR = iv.Group_to_Activate(
|
6
|
+
name="VHM",
|
7
|
+
all_params=[
|
8
|
+
iv.Param_to_Activate(
|
9
|
+
key="x", group="Internal variables to save", file="soil",
|
10
|
+
all_variables=[
|
11
|
+
iv.Internal_Variable(name=f"%xu", file="xu", type_of_var=iv.FRAC_VAR, linked_param=None),
|
12
|
+
iv.Internal_Variable(name=f"%xof", file="xof", type_of_var=iv.FRAC_VAR, linked_param=None),
|
13
|
+
iv.Internal_Variable(name=f"%xif", file="xif", type_of_var=iv.FRAC_VAR, linked_param=None),
|
14
|
+
iv.Internal_Variable(name=f"%xbf", file="xbf", type_of_var=iv.FRAC_VAR, linked_param=None),
|
15
|
+
]),
|
16
|
+
iv.Param_to_Activate(
|
17
|
+
key="U", group="Internal variables to save", file="soil",
|
18
|
+
all_variables=[
|
19
|
+
iv.Internal_Variable(name="U", file="U", type_of_var=iv.IV_VAR,
|
20
|
+
linked_param=cste.exchange_parameters_VHM_Umax)
|
21
|
+
]),
|
22
|
+
iv.Param_to_Activate(
|
23
|
+
key=None, group=None, file="",
|
24
|
+
all_variables=[
|
25
|
+
iv.Internal_Variable(name="q_of", file="of", type_of_var=iv.DEFAULT_VAR, linked_param=None),
|
26
|
+
iv.Internal_Variable(name="q_if", file="if", type_of_var=iv.DEFAULT_VAR, linked_param=None),
|
27
|
+
iv.Internal_Variable(name="q_bf", file="bf", type_of_var=iv.DEFAULT_VAR, linked_param=None),
|
28
|
+
])
|
29
|
+
])
|
30
|
+
|
31
|
+
|
32
|
+
UHDIST_LINBF_VAR = iv.Group_to_Activate(
|
33
|
+
name="2 layers",
|
34
|
+
all_params=[
|
35
|
+
iv.Param_to_Activate(
|
36
|
+
key="x", group="Internal variables to save", file="soil",
|
37
|
+
all_variables=[
|
38
|
+
iv.Internal_Variable(name=f"% xif", file="x", type_of_var=iv.FRAC_VAR, linked_param=None)
|
39
|
+
]),
|
40
|
+
iv.Param_to_Activate(
|
41
|
+
key="U", group="Internal variables to save", file="soil",
|
42
|
+
all_variables=[
|
43
|
+
iv.Internal_Variable(name=f"%U", file="U", type_of_var=iv.IV_VAR,
|
44
|
+
linked_param=cste.exchange_parameters_Dist_Soil_Umax)
|
45
|
+
]),
|
46
|
+
iv.Param_to_Activate(
|
47
|
+
key="Reservoir", group="Internal variables to save", file="soil",
|
48
|
+
all_variables=[
|
49
|
+
iv.Internal_Variable(name=f"% xp", file="xp", type_of_var=iv.FRAC_VAR, linked_param=None),
|
50
|
+
iv.Internal_Variable(name=f"S", file="S", type_of_var=iv.IV_VAR,
|
51
|
+
linked_param=cste.exchange_parameters_Dist_RS_Hs)
|
52
|
+
]),
|
53
|
+
iv.Param_to_Activate(
|
54
|
+
key=None, group=None, file="",
|
55
|
+
all_variables=[
|
56
|
+
iv.Internal_Variable(name="q_of", file="of", type_of_var=iv.DEFAULT_VAR, linked_param=None),
|
57
|
+
iv.Internal_Variable(name="q_if", file="if", type_of_var=iv.DEFAULT_VAR, linked_param=None)
|
58
|
+
])
|
59
|
+
]
|
60
|
+
)
|
61
|
+
|
62
|
+
|
63
|
+
HBV_VAR = iv.Group_to_Activate(
|
64
|
+
name="HBV",
|
65
|
+
all_params=[
|
66
|
+
iv.Param_to_Activate(
|
67
|
+
key="U", group="Internal variables to save", file="soil",
|
68
|
+
all_variables=[
|
69
|
+
iv.Internal_Variable(name="U", file="U", type_of_var=iv.IV_VAR,
|
70
|
+
linked_param=cste.exchange_parameters_HBV_FC),
|
71
|
+
]),
|
72
|
+
iv.Param_to_Activate(
|
73
|
+
key="Q out", group="Internal variables to save", file="soil",
|
74
|
+
all_variables=[
|
75
|
+
iv.Internal_Variable(name="q recharge", file="qrech", type_of_var=iv.OUT_VAR, linked_param=None),
|
76
|
+
iv.Internal_Variable(name="q capillary", file="qcap", type_of_var=iv.OUT_VAR, linked_param=None),
|
77
|
+
iv.Internal_Variable(name="Evapotranspiration", file="etr", type_of_var=iv.OUT_VAR, linked_param=None)
|
78
|
+
]),
|
79
|
+
iv.Param_to_Activate(
|
80
|
+
key="Su", group="Internal variables to save", file="UZ",
|
81
|
+
all_variables=[
|
82
|
+
iv.Internal_Variable(name="Su", file="Su", type_of_var=iv.IV_VAR,
|
83
|
+
linked_param=cste.exchange_parameters_HBV_SUmax)
|
84
|
+
]),
|
85
|
+
iv.Param_to_Activate(
|
86
|
+
key="Q out", group="Internal variables to save", file="UZ",
|
87
|
+
all_variables=[
|
88
|
+
iv.Internal_Variable(name="q_of", file="qr", type_of_var=iv.FINAL_OUT_VAR, linked_param=None),
|
89
|
+
iv.Internal_Variable(name="q_if", file="qif", type_of_var=iv.FINAL_OUT_VAR, linked_param=None),
|
90
|
+
iv.Internal_Variable(name="q percolation", file="qperc", type_of_var=iv.OUT_VAR, linked_param=None),
|
91
|
+
iv.Internal_Variable(name="q cap UZ", file="qcap", type_of_var=iv.OUT_VAR, linked_param=None)
|
92
|
+
]),
|
93
|
+
iv.Param_to_Activate(
|
94
|
+
key=None, group=None, file="",
|
95
|
+
all_variables=[
|
96
|
+
iv.Internal_Variable(name="q_bf", file="bf", type_of_var=iv.DEFAULT_VAR, linked_param=None)
|
97
|
+
])
|
98
|
+
]
|
99
|
+
)
|
100
|
+
|
101
|
+
SACSMA_VAR = iv.Group_to_Activate(
|
102
|
+
name="SAC-SMA",
|
103
|
+
all_params=[
|
104
|
+
iv.Param_to_Activate(
|
105
|
+
key="IV", group="Internal variables to save", file="UZ",
|
106
|
+
all_variables=[
|
107
|
+
iv.Internal_Variable(name="C_UZ_TW", file="Ctw", type_of_var=iv.IV_VAR,
|
108
|
+
linked_param=cste.exchange_parameters_SAC_M_UZ_TW),
|
109
|
+
iv.Internal_Variable(name="C_UZ_FW", file="Cfw", type_of_var=iv.IV_VAR,
|
110
|
+
linked_param=cste.exchange_parameters_SAC_M_UZ_FW),
|
111
|
+
iv.Internal_Variable(name="C_Adimp", file="Cadimp", type_of_var=iv.IV_VAR, linked_param=None)
|
112
|
+
]),
|
113
|
+
iv.Param_to_Activate(
|
114
|
+
key="Q out", group="Internal variables to save", file="UZ",
|
115
|
+
all_variables=[
|
116
|
+
iv.Internal_Variable(name="E1", file="e1", type_of_var=iv.OUT_VAR, linked_param=None),
|
117
|
+
iv.Internal_Variable(name="E2", file="e2", type_of_var=iv.OUT_VAR, linked_param=None),
|
118
|
+
iv.Internal_Variable(name="E5", file="e5", type_of_var=iv.OUT_VAR, linked_param=None),
|
119
|
+
iv.Internal_Variable(name="q_ft", file="qft", type_of_var=iv.OUT_VAR, linked_param=None),
|
120
|
+
iv.Internal_Variable(name="q_tf", file="qtf", type_of_var=iv.OUT_VAR, linked_param=None),
|
121
|
+
iv.Internal_Variable(name="q_if", file="qif", type_of_var=iv.OUT_VAR, linked_param=None),
|
122
|
+
iv.Internal_Variable(name="q_perc", file="qperc", type_of_var=iv.OUT_VAR, linked_param=None),
|
123
|
+
iv.Internal_Variable(name="q_sr", file="qsr", type_of_var=iv.OUT_VAR, linked_param=None),
|
124
|
+
iv.Internal_Variable(name="q_in Adimp", file="qinadimp", type_of_var=iv.OUT_VAR, linked_param=None),
|
125
|
+
iv.Internal_Variable(name="q_dr Adimp", file="qdr", type_of_var=iv.OUT_VAR, linked_param=None)
|
126
|
+
]),
|
127
|
+
iv.Param_to_Activate(
|
128
|
+
key="IV", group="Internal variables to save", file="LZ",
|
129
|
+
all_variables=[
|
130
|
+
iv.Internal_Variable(name="C_LZ_TW", file="Ctw", type_of_var=iv.IV_VAR,
|
131
|
+
linked_param=cste.exchange_parameters_SAC_M_LZ_TW),
|
132
|
+
iv.Internal_Variable(name="C_LZ_FP", file="Cfp", type_of_var=iv.IV_VAR,
|
133
|
+
linked_param=cste.exchange_parameters_SAC_M_LZ_FP),
|
134
|
+
iv.Internal_Variable(name="C_LZ_FS", file="Cfs", type_of_var=iv.IV_VAR,
|
135
|
+
linked_param=cste.exchange_parameters_SAC_M_LZ_FS)
|
136
|
+
]),
|
137
|
+
iv.Param_to_Activate(
|
138
|
+
key="Q out", group="Internal variables to save", file="LZ",
|
139
|
+
all_variables=[
|
140
|
+
iv.Internal_Variable(name="E3", file="e3", type_of_var=iv.OUT_VAR, linked_param=None),
|
141
|
+
iv.Internal_Variable(name="q_fp", file="qfp", type_of_var=iv.OUT_VAR, linked_param=None),
|
142
|
+
iv.Internal_Variable(name="q_fs", file="qfs", type_of_var=iv.OUT_VAR, linked_param=None),
|
143
|
+
iv.Internal_Variable(name="q_in tw", file="qintw", type_of_var=iv.OUT_VAR, linked_param=None),
|
144
|
+
iv.Internal_Variable(name="q_in fp", file="qinfp", type_of_var=iv.OUT_VAR, linked_param=None),
|
145
|
+
iv.Internal_Variable(name="q_in fs", file="qinfs", type_of_var=iv.OUT_VAR, linked_param=None),
|
146
|
+
iv.Internal_Variable(name="q_out tw", file="qouttw", type_of_var=iv.OUT_VAR, linked_param=None),
|
147
|
+
iv.Internal_Variable(name="q_out fp", file="qoutfp", type_of_var=iv.OUT_VAR, linked_param=None),
|
148
|
+
iv.Internal_Variable(name="q_out fs", file="qoutfs", type_of_var=iv.OUT_VAR, linked_param=None),
|
149
|
+
]),
|
150
|
+
iv.Param_to_Activate(
|
151
|
+
key=None, group=None, file="out",
|
152
|
+
all_variables=[
|
153
|
+
iv.Internal_Variable(name="E_tot", file="Etot", type_of_var=iv.DEFAULT_VAR, linked_param=None),
|
154
|
+
iv.Internal_Variable(name="Q_of", file="Qof", type_of_var=iv.DEFAULT_VAR, linked_param=None),
|
155
|
+
iv.Internal_Variable(name="Q_if", file="Qif", type_of_var=iv.DEFAULT_VAR, linked_param=None),
|
156
|
+
iv.Internal_Variable(name="Q_bf", file="Qbf", type_of_var=iv.DEFAULT_VAR, linked_param=None),
|
157
|
+
iv.Internal_Variable(name="Q_subbf", file="Qsubbf", type_of_var=iv.DEFAULT_VAR, linked_param=None),
|
158
|
+
iv.Internal_Variable(name="Q_surf", file="Qsurf", type_of_var=iv.DEFAULT_VAR, linked_param=None),
|
159
|
+
iv.Internal_Variable(name="Q_base", file="Qbase", type_of_var=iv.DEFAULT_VAR, linked_param=None)
|
160
|
+
])
|
161
|
+
]
|
162
|
+
)
|
163
|
+
|
164
|
+
NAM_VAR = iv.Group_to_Activate(
|
165
|
+
name="NAM",
|
166
|
+
all_params=[
|
167
|
+
iv.Param_to_Activate(
|
168
|
+
key="U", group="Internal variables to save", file="SS",
|
169
|
+
all_variables=[
|
170
|
+
iv.Internal_Variable(name="U", file="U", type_of_var=iv.IV_VAR,
|
171
|
+
linked_param=cste.exchange_parameters_NAM_UMAX),
|
172
|
+
]),
|
173
|
+
iv.Param_to_Activate(
|
174
|
+
key="Q out", group="Internal variables to save", file="SS",
|
175
|
+
all_variables=[
|
176
|
+
iv.Internal_Variable(name="qqof", file="qof", type_of_var=iv.OUT_VAR, linked_param=None),
|
177
|
+
iv.Internal_Variable(name="qqif", file="qif", type_of_var=iv.OUT_VAR, linked_param=None),
|
178
|
+
# iv.Internal_Variable(name="q_infil", file="qinfil", type_of_var=iv.OUT_VAR),
|
179
|
+
iv.Internal_Variable(name="Ea", file="ea", type_of_var=iv.OUT_VAR, linked_param=None),
|
180
|
+
]),
|
181
|
+
iv.Param_to_Activate(
|
182
|
+
key="IV", group="Internal variables to save", file="RZ",
|
183
|
+
all_variables=[
|
184
|
+
iv.Internal_Variable(name="L", file="L", type_of_var=iv.IV_VAR,
|
185
|
+
linked_param=cste.exchange_parameters_NAM_LMAX),
|
186
|
+
]),
|
187
|
+
iv.Param_to_Activate(
|
188
|
+
key="Q out", group="Internal variables to save", file="RZ",
|
189
|
+
all_variables=[
|
190
|
+
iv.Internal_Variable(name="E_rz", file="erz", type_of_var=iv.OUT_VAR, linked_param=None),
|
191
|
+
iv.Internal_Variable(name="q_g", file="qg", type_of_var=iv.OUT_VAR, linked_param=None),
|
192
|
+
]),
|
193
|
+
iv.Param_to_Activate(
|
194
|
+
key=None, group=None, file="",
|
195
|
+
all_variables=[
|
196
|
+
iv.Internal_Variable(name="q_of", file="OF", type_of_var=iv.DEFAULT_VAR, linked_param=None),
|
197
|
+
iv.Internal_Variable(name="q_if", file="IF", type_of_var=iv.DEFAULT_VAR, linked_param=None),
|
198
|
+
iv.Internal_Variable(name="q_bf", file="BF", type_of_var=iv.DEFAULT_VAR, linked_param=None)
|
199
|
+
])
|
200
|
+
]
|
201
|
+
)
|
202
|
+
|
203
|
+
|
204
|
+
MODELS_VAR:dict[int, iv.Group_to_Activate] = {
|
205
|
+
cst.tom_VHM: VHM_VAR,
|
206
|
+
cst.tom_2layers_linIF: UHDIST_LINBF_VAR,
|
207
|
+
cst.tom_HBV: HBV_VAR,
|
208
|
+
cst.tom_SAC_SMA: SACSMA_VAR,
|
209
|
+
cst.tom_NAM: NAM_VAR
|
210
|
+
}
|
211
|
+
|
212
|
+
if __name__ == "__main__":
|
213
|
+
print(f"VHM keys: {VHM_VAR.get_keys()}")
|
214
|
+
print(f"UHDIST_LINBF keys: {UHDIST_LINBF_VAR.get_keys()}")
|
215
|
+
print(f"HBV keys: {HBV_VAR.get_keys()}")
|
216
|
+
print(f"SACSMA keys: {SACSMA_VAR.get_keys()}")
|
217
|
+
print(f"NAM keys: {NAM_VAR.get_keys()}")
|
218
|
+
|
219
|
+
print(f"VHM files: {VHM_VAR.get_files_per_keys()}")
|
220
|
+
print(f"UHDIST_LINBF files: {UHDIST_LINBF_VAR.get_files_per_keys()}")
|
221
|
+
print(f"HBV files: {HBV_VAR.get_files_per_keys()}")
|
222
|
+
print(f"SACSMA files: {SACSMA_VAR.get_files_per_keys()}")
|
223
|
+
print(f"NAM files: {NAM_VAR.get_files_per_keys()}")
|
@@ -27,6 +27,8 @@ from ..wolf_array import *
|
|
27
27
|
from ..PyGui import GenMapManager,HydrologyModel
|
28
28
|
from . import cst_exchanges as cste
|
29
29
|
from . import constant as cst
|
30
|
+
from . import Models_characteristics as mc
|
31
|
+
from . import Internal_variables as iv
|
30
32
|
from ..PyTranslate import _
|
31
33
|
import traceback
|
32
34
|
|
@@ -228,7 +230,12 @@ class Optimisation(wx.Frame):
|
|
228
230
|
self.Bind(wx.EVT_MENU, self.test_equifinality_with_Nash, testEquiFinClick)
|
229
231
|
plotEquiFinClick = toolMenu.Append(wx.ID_ANY, 'Plot equifinality with Nash')
|
230
232
|
self.Bind(wx.EVT_MENU, self.plot_equifinality, plotEquiFinClick)
|
231
|
-
|
233
|
+
testEquiFinClick = toolMenu.Append(wx.ID_ANY, 'Models analysis with Nash')
|
234
|
+
self.Bind(wx.EVT_MENU, self.launch_models_propertie_with_Nash, testEquiFinClick)
|
235
|
+
plotEquiFinClick = toolMenu.Append(wx.ID_ANY, 'Plot analysis with Nash')
|
236
|
+
self.Bind(wx.EVT_MENU, self.plot_model_analysis, plotEquiFinClick)
|
237
|
+
|
238
|
+
|
232
239
|
|
233
240
|
# Creation of the Lauch Menu
|
234
241
|
launchMenu = wx.Menu()
|
@@ -1990,24 +1997,31 @@ class Optimisation(wx.Frame):
|
|
1990
1997
|
if cur_sub.iDSorted != refCatch.myEffSortSubBasins[cur_effsub]:
|
1991
1998
|
continue
|
1992
1999
|
self.myParams[i+1]["value"] = params[i]
|
1993
|
-
|
1994
|
-
convFact = myModelDict[int(myType)]["Convertion Factor"]
|
1995
|
-
else:
|
1996
|
-
convFact = 1.0
|
2000
|
+
|
1997
2001
|
all_files = myModelDict[int(myType)]["File"]
|
1998
2002
|
if type(all_files) is not list:
|
2003
|
+
# Extract the unit conversion factor
|
2004
|
+
if "Convertion Factor" in myModelDict[int(myType)]:
|
2005
|
+
convFact = myModelDict[int(myType)]["Convertion Factor"]
|
2006
|
+
else:
|
2007
|
+
convFact = 1.0
|
1999
2008
|
fileName = myModelDict[int(myType)]["File"]
|
2000
2009
|
myGroup = myModelDict[int(myType)]["Group"]
|
2001
2010
|
myKey = myModelDict[int(myType)]["Key"]
|
2002
2011
|
self.write_one_opti_param(filePath, fileName, myGroup, myKey, params[i], convers_factor=convFact)
|
2003
2012
|
else:
|
2013
|
+
# Extract the unit conversion factor in a list which is the same size as the number of files
|
2014
|
+
if "Convertion Factor" in myModelDict[int(myType)]:
|
2015
|
+
convFact = myModelDict[int(myType)]["Convertion Factor"]
|
2016
|
+
else:
|
2017
|
+
convFact = [1.0]*len(all_files)
|
2018
|
+
# Iterate over all the files to fill for one parameter
|
2004
2019
|
for iFile in range(len(all_files)):
|
2005
2020
|
fileName = all_files[iFile]
|
2006
2021
|
myGroup = myModelDict[int(myType)]["Group"][iFile]
|
2007
2022
|
myKey = myModelDict[int(myType)]["Key"][iFile]
|
2008
|
-
self.write_one_opti_param(filePath, fileName, myGroup, myKey, params[i], convers_factor=convFact)
|
2009
|
-
else:
|
2010
|
-
|
2023
|
+
self.write_one_opti_param(filePath, fileName, myGroup, myKey, params[i], convers_factor=convFact[iFile])
|
2024
|
+
else:
|
2011
2025
|
self.curParams_vec_F[i] = params[i]
|
2012
2026
|
self.update_timeDelay(i+1)
|
2013
2027
|
refCatch.save_timeDelays([self.myParams[i+1]["junction_name"]])
|
@@ -2662,10 +2676,10 @@ class Optimisation(wx.Frame):
|
|
2662
2676
|
logging.info("The equifinality test is finished!")
|
2663
2677
|
|
2664
2678
|
|
2665
|
-
def get_best_params(self, stationOut:str,
|
2666
|
-
criterion:str="Nash", quantile:float=0.99, std:float=0.05, eps:float=0.
|
2667
|
-
objective_fct:bool= True, apply_clustering:bool=False):
|
2668
|
-
from sklearn.cluster import DBSCAN
|
2679
|
+
def get_best_params(self, stationOut:str,
|
2680
|
+
criterion:str="Nash", quantile:float=0.99, std:float=0.05, eps:float=0.2, rmv_near_max=1e-4, nb_rand_close:int=10,
|
2681
|
+
objective_fct:bool= True, apply_clustering:bool=False, objective_weight:float=1.0):
|
2682
|
+
from sklearn.cluster import DBSCAN
|
2669
2683
|
"""
|
2670
2684
|
Get the best parameters for a given station.
|
2671
2685
|
|
@@ -2685,12 +2699,26 @@ class Optimisation(wx.Frame):
|
|
2685
2699
|
|
2686
2700
|
quantile_cond = (all_obj_fct > np.quantile(all_obj_fct, quantile))
|
2687
2701
|
std_cond = (all_obj_fct > best_objfct*(1-std))
|
2688
|
-
|
2702
|
+
tooclose_cond = (all_obj_fct < best_objfct*(1-rmv_near_max)) | (all_obj_fct == best_objfct)
|
2703
|
+
all_cond = np.where(quantile_cond & std_cond & tooclose_cond)[0]
|
2689
2704
|
eff_params = all_params[all_cond]
|
2690
2705
|
eff_obj = all_obj_fct[all_cond]
|
2691
2706
|
|
2692
2707
|
if objective_fct:
|
2693
2708
|
eff_params = np.column_stack((eff_params, eff_obj))
|
2709
|
+
# Select randomly the parameters that are close to the best one
|
2710
|
+
if nb_rand_close>0:
|
2711
|
+
close_params = all_params[~tooclose_cond]
|
2712
|
+
if np.shape(close_params)[0]>0:
|
2713
|
+
close_obj = all_obj_fct[~tooclose_cond]
|
2714
|
+
# random selection of the parameters that are close to the best one
|
2715
|
+
idx = np.random.choice(np.shape(close_params)[0], size=nb_rand_close, replace=False)
|
2716
|
+
selected_params = close_params[idx]
|
2717
|
+
selected_obj = close_obj[idx]
|
2718
|
+
tot_add_params = np.column_stack((selected_params, selected_obj))
|
2719
|
+
# Add the selected parameters to the eff_params
|
2720
|
+
eff_params = np.vstack((eff_params, tot_add_params))
|
2721
|
+
# add to the eff_params
|
2694
2722
|
|
2695
2723
|
# In this part we filter abd remove the parameters that are almost equivalent
|
2696
2724
|
# To do so, we use the DBSCAN clustering algorithm to group the parameters that are close to each other
|
@@ -2701,6 +2729,10 @@ class Optimisation(wx.Frame):
|
|
2701
2729
|
min_param = np.min(eff_params, axis=0)
|
2702
2730
|
max_param = np.max(eff_params, axis=0)
|
2703
2731
|
norm_params = (eff_params-min_param)/(max_param-min_param)
|
2732
|
+
# Add weight to the objective function to make it more important in the clustering
|
2733
|
+
# FIXME : to be improved
|
2734
|
+
norm_params[:,-1] = norm_params[:,-1]*objective_weight
|
2735
|
+
# Apply the DBSCAN clustering algorithm to group the parameters
|
2704
2736
|
db = DBSCAN(eps=eps).fit(norm_params)
|
2705
2737
|
labels = db.labels_
|
2706
2738
|
# Extraction of the number of groups and particular cases
|
@@ -2800,6 +2832,35 @@ class Optimisation(wx.Frame):
|
|
2800
2832
|
return None
|
2801
2833
|
cur_fracts = curBasin.get_volume_fractions(interval=intervals)
|
2802
2834
|
return cur_fracts
|
2835
|
+
|
2836
|
+
def _get_flow_fractions(self, idLauncher:int=0, stationOut:str="",
|
2837
|
+
intervals:list[tuple[datetime.datetime, datetime.datetime]]=[]) -> dict[list[str], list[float]]:
|
2838
|
+
|
2839
|
+
curCatch:Catchment = self.myCases[idLauncher].refCatchment
|
2840
|
+
cur_key = curCatch.get_key_catchmentDict(stationOut)
|
2841
|
+
curBasin: SubBasin = curCatch.catchmentDict[cur_key]
|
2842
|
+
if type(curBasin) != SubBasin:
|
2843
|
+
logging.warning("The current module is not a SubBasin object!")
|
2844
|
+
return None
|
2845
|
+
cur_fracts = curBasin.get_flow_fractions(interval=intervals, summary="mean")
|
2846
|
+
return cur_fracts
|
2847
|
+
|
2848
|
+
|
2849
|
+
def _get_punctual_reservoir_fractions(self, eval_date:datetime.datetime,
|
2850
|
+
idLauncher:int=0, stationOut:str="") -> dict[list[str], list[float]]:
|
2851
|
+
|
2852
|
+
curCatch:Catchment = self.myCases[idLauncher].refCatchment
|
2853
|
+
cur_key = curCatch.get_key_catchmentDict(stationOut)
|
2854
|
+
curBasin: SubBasin = curCatch.catchmentDict[cur_key]
|
2855
|
+
if type(curBasin) != SubBasin:
|
2856
|
+
logging.warning("The current module is not a SubBasin object!")
|
2857
|
+
return None
|
2858
|
+
linked_params = mc.MODELS_VAR[curBasin.model].get_all_linked_params()
|
2859
|
+
i_params = self._get_key_from_type_all_parameters(list(linked_params.values()))
|
2860
|
+
max_params = {var_name: self.myParams[i_params[param_id]]["value"] for var_name, param_id in linked_params.items()}
|
2861
|
+
cur_fracts = curBasin.get_iv_fractions_one_date(max_params=max_params, eval_date=eval_date)
|
2862
|
+
return cur_fracts
|
2863
|
+
|
2803
2864
|
|
2804
2865
|
# FIXME : to improve and generalise
|
2805
2866
|
def _get_max_runoff(self, idLauncher:int=0, stationOut:str="",
|
@@ -2855,7 +2916,6 @@ class Optimisation(wx.Frame):
|
|
2855
2916
|
for i in range(nbInlets):
|
2856
2917
|
names.append("TimeDelay "+inletsNames[i])
|
2857
2918
|
|
2858
|
-
|
2859
2919
|
return names
|
2860
2920
|
|
2861
2921
|
# Plot the equifinalty test for each station
|
@@ -2938,6 +2998,63 @@ class Optimisation(wx.Frame):
|
|
2938
2998
|
|
2939
2999
|
plt.show()
|
2940
3000
|
|
3001
|
+
# Plot the equifinalty test for each station
|
3002
|
+
def plot_model_analysis(self, event, idLauncher:int=0):
|
3003
|
+
|
3004
|
+
physical_properties = ["%q_of", "%q_if", "%q_bf"]
|
3005
|
+
# physical_properties_vol = [el+" volume" for el in physical_properties]
|
3006
|
+
colors_properties = ["b", "g", "k"]
|
3007
|
+
y_label = "Nash"
|
3008
|
+
|
3009
|
+
if self.myStations==[]:
|
3010
|
+
self.set_compare_stations(idLauncher=idLauncher)
|
3011
|
+
sortJct = self.myStations
|
3012
|
+
|
3013
|
+
for iOpti in range(len(sortJct)):
|
3014
|
+
stationOut = sortJct[iOpti]
|
3015
|
+
filename = os.path.join(self.workingDir, stationOut+"_tests.xlsx")
|
3016
|
+
if os.path.isfile(filename):
|
3017
|
+
df = pd.read_excel(filename, sheet_name=stationOut)
|
3018
|
+
# Plot the physical properties
|
3019
|
+
fig, ax = plt.subplots()
|
3020
|
+
for cur_prop, cur_color in zip(physical_properties, colors_properties):
|
3021
|
+
cur_columns = [col for col in df.columns if cur_prop in col.replace(" ", "")]
|
3022
|
+
if cur_columns != []:
|
3023
|
+
corr_prop = cur_columns[0]
|
3024
|
+
ax.scatter(df.loc[:,corr_prop], df.loc[:,y_label], s=0.5, c=cur_color,
|
3025
|
+
marker='o', label=cur_prop, alpha=0.4)
|
3026
|
+
ax.set_xlabel("% of the rain [-]")
|
3027
|
+
ax.set_ylabel(y_label+" [-]")
|
3028
|
+
ax.set_title("Proportion of rain : "+stationOut)
|
3029
|
+
ax.legend()
|
3030
|
+
fig.savefig(os.path.join(self.workingDir, "Equifinality_physical_prop_"+stationOut+".png"))
|
3031
|
+
# Plot the Probability of exceedance
|
3032
|
+
cur_color = colors_properties[0]
|
3033
|
+
x_label = "P. of exceedance"
|
3034
|
+
fig, ax = plt.subplots()
|
3035
|
+
if x_label in df.columns:
|
3036
|
+
ax.scatter(df.loc[:,x_label], df.loc[:,y_label], s=0.5, c=cur_color, marker='o', label=x_label)
|
3037
|
+
ax.set_xlabel(x_label +" [-]")
|
3038
|
+
ax.set_ylabel(y_label+" [-]")
|
3039
|
+
ax.set_title("Probability of Q_sim > Q_meas : "+stationOut)
|
3040
|
+
ax.legend()
|
3041
|
+
fig.savefig(os.path.join(self.workingDir, "Equifinality_prob_excess_"+stationOut+".png"))
|
3042
|
+
# Plot Q_sim/Q_max
|
3043
|
+
x_label = "Qmax_simul/Q_max_measure"
|
3044
|
+
fig, ax = plt.subplots()
|
3045
|
+
if x_label in df.columns:
|
3046
|
+
ax.scatter(df.loc[:,x_label], df.loc[:,y_label], s=0.5, c=cur_color, marker='o', label=x_label)
|
3047
|
+
ax.set_xlabel(x_label +" [-]")
|
3048
|
+
ax.set_ylabel(y_label+" [-]")
|
3049
|
+
ax.set_title("Peak analysis : "+stationOut)
|
3050
|
+
ax.legend()
|
3051
|
+
fig.savefig(os.path.join(self.workingDir, "Equifinality_peaks_ratio_"+stationOut+".png"))
|
3052
|
+
|
3053
|
+
else:
|
3054
|
+
logging.error("The file "+filename+" does not exist!")
|
3055
|
+
|
3056
|
+
plt.show()
|
3057
|
+
|
2941
3058
|
|
2942
3059
|
def add_Case(self, idLauncher:int=0):
|
2943
3060
|
|
@@ -3093,6 +3210,169 @@ class Optimisation(wx.Frame):
|
|
3093
3210
|
tmpWolf = None
|
3094
3211
|
|
3095
3212
|
|
3213
|
+
# FIXME : this function has been dashed off -> functionnal but not well written!!
|
3214
|
+
# TODO : to improve !!!!!!
|
3215
|
+
def launch_models_propertie_with_Nash(self, event, idLauncher:int=0, idOpti:int=1, quantile_Nash:float=0.01, std_Nash:float=0.03, clustering_Nash:bool=True,
|
3216
|
+
save_every:int=100, restart_from_file:bool=True):
|
3217
|
+
"""
|
3218
|
+
Analyse the properties of the model and compare them with the Nash coefficient.
|
3219
|
+
|
3220
|
+
Args:
|
3221
|
+
idLauncher (int, optional): The id of the launcher. Defaults to 0.
|
3222
|
+
|
3223
|
+
Returns:
|
3224
|
+
None
|
3225
|
+
|
3226
|
+
Raises:
|
3227
|
+
None
|
3228
|
+
"""
|
3229
|
+
curCatch:Catchment = self.myCases[idLauncher].refCatchment
|
3230
|
+
|
3231
|
+
onlyOwnSub = self.optiParam.get_param("Semi-Distributed", "Own_SubBasin")
|
3232
|
+
if onlyOwnSub is None:
|
3233
|
+
onlyOwnSub = False
|
3234
|
+
doneList = []
|
3235
|
+
previousLevel = 1
|
3236
|
+
# Collect sort and save the compare stations
|
3237
|
+
self.set_compare_stations(idLauncher=idLauncher)
|
3238
|
+
sortJct = self.myStations
|
3239
|
+
# Get the initial number of intervals
|
3240
|
+
# -> these can evolve according to the measurement available at each station
|
3241
|
+
is_ok = self._save_opti_intervals()
|
3242
|
+
all_intervals = self.all_intervals
|
3243
|
+
# Activate the writing of the internal variables
|
3244
|
+
curCatch.activate_all_internal_variables()
|
3245
|
+
# Prepare the Excel writer
|
3246
|
+
writer_tot = pd.ExcelWriter(os.path.join(self.workingDir, "all_best_tests.xlsx"), engine = 'xlsxwriter')
|
3247
|
+
|
3248
|
+
for iOpti in range(len(sortJct)):
|
3249
|
+
stationOut = sortJct[iOpti]
|
3250
|
+
logging.info("==================")
|
3251
|
+
logging.info("Station : "+stationOut)
|
3252
|
+
# Build the current compare.txt file and replace all nan values by 0.0
|
3253
|
+
self.save_current_compare_file(stationOut=stationOut)
|
3254
|
+
# Save the name of the station that will be the output
|
3255
|
+
curCatch.define_station_out(stationOut)
|
3256
|
+
# Activate all the useful subs and write it in the param file
|
3257
|
+
curCatch.activate_usefulSubs(blockJunction=doneList, onlyItself=onlyOwnSub)
|
3258
|
+
# Select correct calibration intervals -> remove the intervals with NaN
|
3259
|
+
cur_intervals = self.select_opti_intervals(all_intervals=all_intervals, stationOut=stationOut, filter_nan=True)
|
3260
|
+
self.save_opti_dates_to_file(cur_intervals)
|
3261
|
+
# Rename the result file
|
3262
|
+
self.optiParam.change_param("Optimizer", "fname", stationOut)
|
3263
|
+
self.optiParam.SavetoFile(None)
|
3264
|
+
self.optiParam.Reload(None)
|
3265
|
+
self.update_myParams(idLauncher)
|
3266
|
+
# Prepare the paramPy dictionnary before calibration
|
3267
|
+
self.prepare_calibration_timeDelay(stationOut=stationOut)
|
3268
|
+
# Reload the useful modules
|
3269
|
+
self.reload_hydro(idCompar=0, fromStation=stationOut, lastLevel=previousLevel, updateAll=True)
|
3270
|
+
## =======
|
3271
|
+
## Init
|
3272
|
+
## =======
|
3273
|
+
self.init_optimizer(idOpti)
|
3274
|
+
self.associate_ptr(None, idOpti=idOpti)
|
3275
|
+
# Get the best parameters to test
|
3276
|
+
all_params = self.get_best_params(stationOut=stationOut, quantile=quantile_Nash, std=std_Nash, rmv_near_max=1e-4, apply_clustering=clustering_Nash)
|
3277
|
+
## =======
|
3278
|
+
## Compute
|
3279
|
+
## =======
|
3280
|
+
all_frac = []
|
3281
|
+
# Check if the excel file already exists and load it to check if some parameters have already been tested
|
3282
|
+
if restart_from_file:
|
3283
|
+
all_frac, all_params = self._reload_model_analysis(stationOut=stationOut, all_params=all_params)
|
3284
|
+
# Get param names
|
3285
|
+
names = self.get_param_names(idLauncher=idLauncher, stationOut=stationOut)
|
3286
|
+
logging.info("The number of sets of parameters to test are : "+str(len(all_params)))
|
3287
|
+
for i in tqdm(range(len(all_params))):
|
3288
|
+
cur_p = all_params[i, :-1]
|
3289
|
+
cur_obj = all_params[i, -1]
|
3290
|
+
cur_obj2 = self.evaluate_model_optimizer(cur_p, idOpti=idOpti)
|
3291
|
+
print("cur_obj : ", cur_obj, " ; cur_obj2 : ", cur_obj2)
|
3292
|
+
if cur_obj != cur_obj2:
|
3293
|
+
logging.error("The objective function is not the same as the one computed by the model!")
|
3294
|
+
logging.error("cur_obj : "+str(cur_obj)+" ; cur_obj2 : "+str(cur_obj2))
|
3295
|
+
# assert cur_obj == cur_obj2, "The objective function is not the same as the one computed by the model!"
|
3296
|
+
self.write_mesh_results_optimizer(idOpti=idOpti)
|
3297
|
+
# Save all the variables/evaluations desired
|
3298
|
+
frac_flow_dict = self._get_flow_fractions(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
|
3299
|
+
init_iv = self._get_punctual_reservoir_fractions(eval_date=cur_intervals[0][0], idLauncher=idLauncher, stationOut=stationOut)
|
3300
|
+
p_excess = self._get_exceedance(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
|
3301
|
+
max_sim_obs = self._get_ratio_max_sim_obs(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
|
3302
|
+
# Extract the time delays
|
3303
|
+
all_timeDelays = curCatch.get_timeDelays_inlets(ref=stationOut)
|
3304
|
+
all_timeDelays_str = {key : str(datetime.timedelta(seconds=all_timeDelays[key])) for key in all_timeDelays}
|
3305
|
+
cur_timeDelays = list(all_timeDelays_str.values())
|
3306
|
+
# Concatenate all the informations
|
3307
|
+
cur_all_frac = (list(cur_p)
|
3308
|
+
+ cur_timeDelays
|
3309
|
+
+ list(frac_flow_dict.values())
|
3310
|
+
+ list(init_iv.values())
|
3311
|
+
+ [p_excess, max_sim_obs, cur_obj])
|
3312
|
+
all_frac.append(cur_all_frac)
|
3313
|
+
# Periodically save the evaluations in case of trouble
|
3314
|
+
if (i + 1) % save_every == 0:
|
3315
|
+
# Save the evaluations
|
3316
|
+
var_names = names \
|
3317
|
+
+ list(all_timeDelays_str.keys()) \
|
3318
|
+
+ list(frac_flow_dict.keys()) \
|
3319
|
+
+ list(init_iv.keys()) \
|
3320
|
+
+ ["P. of exceedance", "Qmax_simul/Q_max_measure", "Nash"]
|
3321
|
+
cur_df = pd.DataFrame(all_frac, columns=var_names)
|
3322
|
+
# write first the tempory results for each station
|
3323
|
+
writer_stat = pd.ExcelWriter(os.path.join(self.workingDir, stationOut+"_tests.xlsx"), engine = 'xlsxwriter')
|
3324
|
+
cur_df.to_excel(writer_stat, sheet_name=stationOut, columns=var_names)
|
3325
|
+
writer_stat.sheets[stationOut].autofit()
|
3326
|
+
writer_stat.close()
|
3327
|
+
|
3328
|
+
# Save the evaluations
|
3329
|
+
var_names = names \
|
3330
|
+
+ list(all_timeDelays_str.keys()) \
|
3331
|
+
+ list(frac_flow_dict.keys()) \
|
3332
|
+
+ list(init_iv.keys()) \
|
3333
|
+
+ ["P. of exceedance", "Qmax_simul/Q_max_measure", "Nash"]
|
3334
|
+
cur_df = pd.DataFrame(all_frac, columns=var_names)
|
3335
|
+
# write first the tempory results for each station
|
3336
|
+
writer_stat = pd.ExcelWriter(os.path.join(self.workingDir, stationOut+"_tests.xlsx"), engine = 'xlsxwriter')
|
3337
|
+
cur_df.to_excel(writer_stat, sheet_name=stationOut, columns=var_names)
|
3338
|
+
writer_stat.sheets[stationOut].autofit()
|
3339
|
+
writer_stat.close()
|
3340
|
+
# write now the informations for all the stations in the same excel file
|
3341
|
+
cur_df.to_excel(writer_tot, sheet_name=stationOut, columns=var_names)
|
3342
|
+
writer_tot.sheets[stationOut].autofit()
|
3343
|
+
|
3344
|
+
## =======
|
3345
|
+
## =======
|
3346
|
+
# Collect the best parameters and their objective function(s)
|
3347
|
+
best_params = self.apply_optim(None)
|
3348
|
+
# Simulation with the best parameters
|
3349
|
+
self.compute_distributed_hydro_model()
|
3350
|
+
# Update myHydro of all effective subbasins to get the best configuration upstream
|
3351
|
+
curCatch.read_hydro_eff_subBasin()
|
3352
|
+
# Update timeDelays according to time wolf_array
|
3353
|
+
self.apply_timeDelay_dist(idOpti=idOpti, idLauncher=idLauncher, junctionKey=stationOut)
|
3354
|
+
# Update the outflows
|
3355
|
+
curCatch.update_hydro(idCompar=0)
|
3356
|
+
|
3357
|
+
# All upstream elements of a reference will be fixed
|
3358
|
+
doneList.append(stationOut)
|
3359
|
+
previousLevel = curCatch.levelOut
|
3360
|
+
|
3361
|
+
writer_tot.close()
|
3362
|
+
logging.info("The equifinality test is finished!")
|
3363
|
+
|
3364
|
+
# FIXME : it might be better to pass the myParams to the CaseOpti object instead to allow parallelisation
|
3365
|
+
def _build_type_to_key_index(self) -> dict[int, int]:
|
3366
|
+
return {param["type"]: i for i, param in self.myParams.items()}
|
3367
|
+
|
3368
|
+
def _get_key_from_type_all_parameters(self, list_type_param: list[int]) -> dict[int | None]:
|
3369
|
+
type_to_key = self._build_type_to_key_index()
|
3370
|
+
return {cur_key: type_to_key.get(cur_key) for cur_key in list_type_param}
|
3371
|
+
|
3372
|
+
def _get_key_from_type_parameter(self, type_param:int) -> int:
|
3373
|
+
return next((i for i, param in self.myParams.items() if param["type"] == type_param), None)
|
3374
|
+
|
3375
|
+
|
3096
3376
|
def make_nd_array(self, c_pointer, shape, dtype=np.float64, order='C', own_data=True,readonly=False):
|
3097
3377
|
arr_size = np.prod(shape[:]) * np.dtype(dtype).itemsize
|
3098
3378
|
|
@@ -3109,3 +3389,33 @@ class Optimisation(wx.Frame):
|
|
3109
3389
|
return arr.copy()
|
3110
3390
|
else:
|
3111
3391
|
return arr
|
3392
|
+
|
3393
|
+
def _reload_model_analysis(self, stationOut:str, all_params:np.ndarray):
|
3394
|
+
"""
|
3395
|
+
Reload the model analysis for a given station.
|
3396
|
+
|
3397
|
+
Args:
|
3398
|
+
stationOut (str): The name of the station.
|
3399
|
+
all_params (np.ndarray): The parameters to be tested.
|
3400
|
+
|
3401
|
+
Returns:
|
3402
|
+
None
|
3403
|
+
|
3404
|
+
Raises:
|
3405
|
+
None
|
3406
|
+
"""
|
3407
|
+
# Check if the excel file already exists and load it to check if some parameters have already been tested
|
3408
|
+
filename = os.path.join(self.workingDir, stationOut+"_tests.xlsx")
|
3409
|
+
# just_params = all_params[:, :-1]
|
3410
|
+
nb_params = np.shape(all_params)[1] - 1
|
3411
|
+
if os.path.isfile(filename):
|
3412
|
+
df = pd.read_excel(os.path.join(self.workingDir, stationOut+"_tests.xlsx"), sheet_name=stationOut)
|
3413
|
+
# Extract all the values of the dataframe in a list
|
3414
|
+
all_data_tested = df.iloc[:, 1:].values.tolist()
|
3415
|
+
# Extract all the values of the dataframe in a numpy array
|
3416
|
+
all_params_tested = df.iloc[:, 1:nb_params+1].values
|
3417
|
+
# Remove the parameters that have already been tested
|
3418
|
+
new_params = np.array([el for el in all_params if ~np.any(np.all(np.isclose(all_params_tested, el[:-1], atol=1e-6), axis=1))])
|
3419
|
+
return all_data_tested, new_params
|
3420
|
+
|
3421
|
+
return [], all_params
|