wolfhece 2.2.38__py3-none-any.whl → 2.2.40__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. wolfhece/Coordinates_operations.py +5 -0
  2. wolfhece/GraphNotebook.py +72 -1
  3. wolfhece/GraphProfile.py +1 -1
  4. wolfhece/MulticriteriAnalysis.py +1579 -0
  5. wolfhece/PandasGrid.py +62 -1
  6. wolfhece/PyCrosssections.py +194 -43
  7. wolfhece/PyDraw.py +891 -73
  8. wolfhece/PyGui.py +913 -72
  9. wolfhece/PyGuiHydrology.py +528 -74
  10. wolfhece/PyPalette.py +26 -4
  11. wolfhece/PyParams.py +33 -0
  12. wolfhece/PyPictures.py +2 -2
  13. wolfhece/PyVertex.py +25 -0
  14. wolfhece/PyVertexvectors.py +94 -28
  15. wolfhece/PyWMS.py +52 -36
  16. wolfhece/acceptability/acceptability.py +15 -8
  17. wolfhece/acceptability/acceptability_gui.py +507 -360
  18. wolfhece/acceptability/func.py +80 -183
  19. wolfhece/apps/version.py +1 -1
  20. wolfhece/compare_series.py +480 -0
  21. wolfhece/drawing_obj.py +12 -1
  22. wolfhece/hydrology/Catchment.py +228 -162
  23. wolfhece/hydrology/Internal_variables.py +43 -2
  24. wolfhece/hydrology/Models_characteristics.py +69 -67
  25. wolfhece/hydrology/Optimisation.py +893 -182
  26. wolfhece/hydrology/PyWatershed.py +267 -165
  27. wolfhece/hydrology/SubBasin.py +185 -140
  28. wolfhece/hydrology/cst_exchanges.py +76 -1
  29. wolfhece/hydrology/forcedexchanges.py +413 -49
  30. wolfhece/hydrology/read.py +65 -5
  31. wolfhece/hydrometry/kiwis.py +14 -7
  32. wolfhece/insyde_be/INBE_func.py +746 -0
  33. wolfhece/insyde_be/INBE_gui.py +1776 -0
  34. wolfhece/insyde_be/__init__.py +3 -0
  35. wolfhece/interpolating_raster.py +366 -0
  36. wolfhece/irm_alaro.py +1457 -0
  37. wolfhece/irm_qdf.py +889 -57
  38. wolfhece/lazviewer/laz_viewer.py +4 -1
  39. wolfhece/lifewatch.py +6 -3
  40. wolfhece/picc.py +124 -8
  41. wolfhece/pyLandUseFlanders.py +146 -0
  42. wolfhece/pydownloader.py +35 -1
  43. wolfhece/pywalous.py +225 -31
  44. wolfhece/toolshydrology_dll.py +149 -0
  45. wolfhece/wolf_array.py +63 -25
  46. {wolfhece-2.2.38.dist-info → wolfhece-2.2.40.dist-info}/METADATA +3 -1
  47. {wolfhece-2.2.38.dist-info → wolfhece-2.2.40.dist-info}/RECORD +50 -41
  48. {wolfhece-2.2.38.dist-info → wolfhece-2.2.40.dist-info}/WHEEL +0 -0
  49. {wolfhece-2.2.38.dist-info → wolfhece-2.2.40.dist-info}/entry_points.txt +0 -0
  50. {wolfhece-2.2.38.dist-info → wolfhece-2.2.40.dist-info}/top_level.txt +0 -0
@@ -33,6 +33,7 @@ from . import Models_characteristics as mc
33
33
  from . import Internal_variables as iv
34
34
  from ..PyTranslate import _
35
35
  import traceback
36
+ import gc
36
37
 
37
38
 
38
39
  # %% Constants
@@ -41,7 +42,6 @@ DLL_FILE_DEBUG = "WolfDll_debug.dll" # Name of the debug DLL
41
42
  DLL_FILE_TEST = "WolfDll_test.dll" # Name of the test DLL (to deactivate random numbers generation)
42
43
 
43
44
 
44
-
45
45
  # %% Classes
46
46
  class CaseOpti(GenMapManager):
47
47
 
@@ -69,25 +69,26 @@ class CaseOpti(GenMapManager):
69
69
 
70
70
  self.launcherDir = ""
71
71
 
72
- def read_param(self, dir, copyDefault=False, callback=None, workingDir=""):
72
+ def read_param(self, dir, copyDefault=False, callback=None, workingDir:Path = ""):
73
73
 
74
- self.launcherDir = dir
74
+ self.launcherDir = Path(dir)
75
+ workingDir = Path(workingDir)
75
76
 
76
77
  if not os.path.exists(self.launcherDir):
77
78
  try:
78
79
  os.mkdir(self.launcherDir)
79
- shutil.copyfile(workingDir+"launcher.param.default", os.path.join(self.launcherDir,"launcher.param"))
80
- shutil.copyfile(workingDir+"launcher.param.default", os.path.join(self.launcherDir,"launcher.param.default"))
80
+ shutil.copyfile(workingDir / "launcher.param.default", self.launcherDir / "launcher.param")
81
+ shutil.copyfile(workingDir / "launcher.param.default", self.launcherDir / "launcher.param.default")
81
82
  except OSError:
82
83
  print ("Creation of the directory %s failed" % self.launcherDir)
83
84
  else:
84
85
  print ("Successfully created the directory %s" % self.launcherDir)
85
86
 
86
87
  if copyDefault:
87
- shutil.copyfile(workingDir+"launcher.param.default", os.path.join(self.launcherDir,"launcher.param"))
88
- shutil.copyfile(workingDir+"launcher.param.default", os.path.join(self.launcherDir,"launcher.param.default"))
88
+ shutil.copyfile(workingDir / "launcher.param.default", self.launcherDir / "launcher.param")
89
+ shutil.copyfile(workingDir / "launcher.param.default", self.launcherDir / "launcher.param.default")
89
90
 
90
- self.launcherParam = Wolf_Param(to_read=True, filename=os.path.join(self.launcherDir,"launcher.param"),title="launcher", toShow=False)
91
+ self.launcherParam = Wolf_Param(to_read=True, filename=self.launcherDir / "launcher.param",title="launcher", toShow=False)
91
92
 
92
93
 
93
94
  def show_launcherParam(self, event):
@@ -151,7 +152,6 @@ class Optimisation(wx.Frame):
151
152
  self.myParamsPy = {}
152
153
  self.nbParams = 0
153
154
 
154
- #self.pathDll = Path(os.path.dirname(__file__)).parent
155
155
  # point to the wolf_libs package directory
156
156
  self.pathDll = wolf_libs.__path__[0]
157
157
 
@@ -236,12 +236,11 @@ class Optimisation(wx.Frame):
236
236
  plotEquiFinClick = toolMenu.Append(wx.ID_ANY, 'Plot equifinality with Nash')
237
237
  self.Bind(wx.EVT_MENU, self.plot_equifinality, plotEquiFinClick)
238
238
  testEquiFinClick = toolMenu.Append(wx.ID_ANY, 'Models analysis with Nash')
239
- self.Bind(wx.EVT_MENU, self.launch_models_propertie_with_Nash, testEquiFinClick)
239
+ self.Bind(wx.EVT_MENU, self.launch_models_properties_with_Nash, testEquiFinClick)
240
240
  plotEquiFinClick = toolMenu.Append(wx.ID_ANY, 'Plot analysis with Nash')
241
241
  self.Bind(wx.EVT_MENU, self.plot_model_analysis, plotEquiFinClick)
242
242
 
243
243
 
244
-
245
244
  # Creation of the Lauch Menu
246
245
  launchMenu = wx.Menu()
247
246
  normalLaunchClick = launchMenu.Append(wx.ID_ANY, '1 Basin')
@@ -284,50 +283,54 @@ class Optimisation(wx.Frame):
284
283
 
285
284
 
286
285
  def new(self, event):
286
+ """ Create a new optimisation directory and files. """
287
287
 
288
288
  launcherDir = "simul_1"
289
289
 
290
290
  # Selection of the working directory
291
291
  idir=wx.DirDialog(None,"Choose an optimisation directory")
292
292
  if idir.ShowModal() == wx.ID_CANCEL:
293
- print("Optimisation cancelled!")
293
+ logging.info("Optimisation cancelled!")
294
294
  idir.Destroy()
295
+ return
295
296
 
296
- self.workingDir = idir.GetPath()+"\\"
297
- launcherDir = os.path.join(self.workingDir,launcherDir)
297
+ self.workingDir = Path(idir.GetPath())
298
+ launcherDir = self.workingDir / launcherDir
298
299
  idir.Destroy()
299
300
 
300
301
  # Launch the Fortran code a first time to generate the default files
301
- self.default_files(None)
302
+ self.default_files()
302
303
 
303
304
  # Copy and reading of the optiParam file
304
- shutil.copyfile(self.workingDir+"test_opti.param.default", os.path.join(self.workingDir,"test_opti.param"))
305
- shutil.copyfile(self.workingDir+"sa.param.default", os.path.join(self.workingDir,"sa.param"))
306
- shutil.copyfile(self.workingDir+"compare.how.param.default", os.path.join(self.workingDir,"compare.how.param"))
307
- if not os.path.exists(launcherDir):
305
+ shutil.copyfile(self.workingDir / "test_opti.param.default", self.workingDir / "test_opti.param")
306
+ shutil.copyfile(self.workingDir / "sa.param.default", self.workingDir / "sa.param")
307
+ shutil.copyfile(self.workingDir / "compare.how.param.default", self.workingDir / "compare.how.param")
308
+
309
+ if not launcherDir.exists():
308
310
  try:
309
- os.mkdir(launcherDir)
311
+ launcherDir.mkdir(parents=True, exist_ok=True)
310
312
  except OSError:
311
313
  print ("Creation of the directory %s failed" % launcherDir)
312
314
  else:
313
315
  print ("Successfully created the directory %s" % launcherDir)
314
- shutil.copyfile(self.workingDir+"launcher.param.default", os.path.join(launcherDir,"launcher.param"))
315
- shutil.copyfile(self.workingDir+"launcher.param.default", os.path.join(launcherDir,"launcher.param.default"))
316
+ shutil.copyfile(self.workingDir / "launcher.param.default", launcherDir /"launcher.param")
317
+ shutil.copyfile(self.workingDir / "launcher.param.default", launcherDir / "launcher.param.default")
316
318
 
317
319
 
318
320
  # Read the main opti file
319
- self.optiParam = Wolf_Param(to_read=True, filename=os.path.join(self.workingDir,"test_opti.param"),title="test_opti",toShow=False)
321
+ self.optiParam = Wolf_Param(to_read=True, filename= self.workingDir / "test_opti.param", title = "test_opti", toShow = False)
320
322
  # # Update all the paths and read all simul
321
323
  # self.init_dir_in_params()
322
324
  # Read all the param files and init the Case objects and then read the param files associated
323
325
  newCase = CaseOpti()
324
- newCase.read_param(launcherDir, copyDefault=True, callback=self.update_parameters_launcher, workingDir=self.workingDir)
326
+ newCase.read_param(launcherDir, copyDefault=True, callback=self.update_parameters_launcher, workingDir = self.workingDir)
325
327
  self.myCases.append(newCase)
328
+
326
329
  # Update all the paths and read all simul
327
330
  self.init_dir_in_params()
328
331
 
329
- self.comparHowParam = Wolf_Param(to_read=True,filename=os.path.join(self.workingDir,"compare.how.param"),title="compare.how",toShow=False)
330
- self.saParam = Wolf_Param(to_read=True,filename=os.path.join(self.workingDir,"sa.param"), title="sa",toShow=False)
332
+ self.comparHowParam = Wolf_Param(to_read=True,filename= self.workingDir / "compare.how.param",title="compare.how",toShow=False)
333
+ self.saParam = Wolf_Param(to_read=True,filename= self.workingDir / "sa.param", title="sa",toShow=False)
331
334
  self.saParam._callback = self.update_parameters_SA
332
335
  # initialise all param files according to the reference characteristics
333
336
  self.init_with_reference()
@@ -347,13 +350,13 @@ class Optimisation(wx.Frame):
347
350
  curDir = newCase.launcherParam.get_param("Calculs","Répertoire simulation de référence")
348
351
  isOk, curDir = check_path(curDir, prefix=self.workingDir, applyCWD=True)
349
352
  if isOk<0:
350
- print("ERROR : in path of launcherDir")
351
- newCase.mydro = HydrologyModel(dir=curDir)
353
+ logging.error("ERROR : in path of launcherDir")
354
+ newCase.mydro = HydrologyModel(directory=curDir)
352
355
  newCase.mydro.Hide()
353
356
  self.Bind(wx.EVT_MENU, newCase.show_mydro, guiHydroCase)
354
357
  curCase = paramMenu.Append(newId, curName, caseMenu)
355
358
  except:
356
- print("ERROR: launch again the app and apply 'load' files.")
359
+ logging.error("ERROR: launch again the app and apply 'load' files.")
357
360
 
358
361
 
359
362
 
@@ -368,7 +371,7 @@ class Optimisation(wx.Frame):
368
371
  if workingDir=="":
369
372
  idir=wx.FileDialog(None,"Choose an optimatimisation file",wildcard='Fichiers param (*.param)|*.param')
370
373
  if idir.ShowModal() == wx.ID_CANCEL:
371
- print("Post process cancelled!")
374
+ logging.info(_("Post process cancelled!"))
372
375
  idir.Destroy()
373
376
  return
374
377
  # sys.exit()
@@ -405,7 +408,7 @@ class Optimisation(wx.Frame):
405
408
  launcherDir = self.optiParam.get_param("Cases","dir_"+str(i+1))
406
409
  isOk, launcherDir = check_path(launcherDir, prefix=self.workingDir, applyCWD=True)
407
410
  if isOk<0:
408
- print("ERROR : in path of launcherDir")
411
+ logging.error("ERROR : in path of launcherDir")
409
412
  newCase.read_param(launcherDir, copyDefault=False, callback=self.update_parameters_launcher)
410
413
  # FIXME TO CHANGE when seperation with the GUI
411
414
  if self.wx_exists:
@@ -422,13 +425,13 @@ class Optimisation(wx.Frame):
422
425
  refDir = newCase.launcherParam.get_param("Calculs","Répertoire simulation de référence")
423
426
  isOk, refDir = check_path(refDir, prefix=launcherDir, applyCWD=True)
424
427
  if isOk<0:
425
- print("ERROR : in path of launcherDir")
426
- newCase.mydro = HydrologyModel(dir=refDir)
428
+ logging.error("ERROR : in path of launcherDir")
429
+ newCase.mydro = HydrologyModel(directory=refDir)
427
430
  newCase.mydro.Hide()
428
431
  self.Bind(wx.EVT_MENU, newCase.show_mydro, guiHydroCase)
429
432
  curCase = paramMenu.Append(newId, curName, caseMenu)
430
433
  else:
431
- print("WARNING : this scenario was not implemented yet. This might induce an error!")
434
+ logging.Warning(_("WARNING : this scenario was not implemented yet. This might induce an error!"))
432
435
  # iItem =
433
436
  curCase = paramMenu.Replace(iItem)
434
437
  self.Bind(wx.EVT_MENU, newCase.show_launcherParam, curCase)
@@ -436,7 +439,7 @@ class Optimisation(wx.Frame):
436
439
  else:
437
440
  refDir = newCase.launcherParam.get_param("Calculs","Répertoire simulation de référence")
438
441
  isOk, refDir = check_path(refDir, prefix=launcherDir, applyCWD=True)
439
- newCase.mydro = HydrologyModel(dir=refDir)
442
+ newCase.mydro = HydrologyModel(directory=refDir)
440
443
  self.myCases.append(newCase)
441
444
 
442
445
 
@@ -705,7 +708,7 @@ class Optimisation(wx.Frame):
705
708
  # Selection of the working directory
706
709
  idir=wx.FileDialog(None,"Choose a reference file",wildcard='Fichiers post-processing (*.postPro)|*.postPro',defaultDir=defaultPath)
707
710
  if idir.ShowModal() == wx.ID_CANCEL:
708
- print("Post process cancelled!")
711
+ logging.info(_("Post process cancelled!"))
709
712
  idir.Destroy()
710
713
 
711
714
  refFileName = idir.GetPath()
@@ -752,7 +755,7 @@ class Optimisation(wx.Frame):
752
755
  if self.wx_exists:
753
756
  idir=wx.FileDialog(None,"Choose a reference file",wildcard='Fichiers post-processing (*.postPro)|*.postPro',defaultDir=defaultPath)
754
757
  if idir.ShowModal() == wx.ID_CANCEL:
755
- print("Post process cancelled!")
758
+ logging.info(_("Post process cancelled!"))
756
759
  idir.Destroy()
757
760
  refFileName = idir.GetPath()
758
761
  refDir = idir.GetDirectory()
@@ -813,7 +816,7 @@ class Optimisation(wx.Frame):
813
816
 
814
817
  def checkIntervals(self):
815
818
 
816
- print("So far do nothing to check intervals!")
819
+ logging.info(_("So far do nothing to check intervals!"))
817
820
  # self.comparHowParam[]
818
821
 
819
822
 
@@ -938,38 +941,53 @@ class Optimisation(wx.Frame):
938
941
  self.Fit()
939
942
 
940
943
 
941
- def load_dll(self, path, fileName):
942
- libpath = os.path.join(path,'libs',fileName)
944
+ def load_dll(self, path:str, fileName:str = "WolfDLL.dll"):
945
+ """ Load the Fortran DLL for optimization.
946
+
947
+ :param path: The directory where the DLL is located.
948
+ :param fileName: The name of the DLL file to load.
949
+ """
950
+
951
+ libpath = os.path.join(path, fileName)
952
+
953
+ if not Path(libpath).exists():
954
+ # try libs subdirectory
955
+ libpath = os.path.join(path, "libs", fileName)
956
+ if not Path(libpath).exists():
957
+ logging.error(f"Library not found: {libpath}")
958
+ return
959
+
943
960
  try:
944
961
  self.dllFortran = ct.CDLL(libpath)
945
962
  except:
946
- print('Erreur de chargement de la librairie WolfDLL.dll')
963
+ logging.error(_('Error during loading of WolfDLL.dll -- Please check if all Fortran DLL dependencies are met !'))
947
964
 
948
- def default_files(self, event):
965
+ def default_files(self):
966
+ """ Create the default optimizer files in the working directory. """
949
967
 
950
- pathPtr = self.workingDir.encode('ansi')
968
+ pathPtr = str(self.workingDir).encode('ansi')
951
969
  fileNamePtr = "test_opti.param".encode('ansi')
952
970
  self.dllFortran.new_optimizer_files_py.restype = ct.c_int
953
971
  self.dllFortran.new_optimizer_files_py.argtypes = [ct.c_char_p, ct.c_char_p, ct.c_int, ct.c_int]
954
972
 
955
- print("Launch a Fortran procedure")
973
+ logging.info(_("Launch a Fortran procedure"))
956
974
  id = self.dllFortran.new_optimizer_files_py(pathPtr,fileNamePtr,ct.c_int(len(pathPtr)),ct.c_int(len(fileNamePtr)))
957
975
 
958
- print("id optimizer = ", id)
976
+ logging.info(_("id optimizer = "), id)
959
977
 
960
- print("End of Fortran procedure")
978
+ logging.info(_("End of Fortran procedure"))
961
979
 
962
980
  def compute_optimizer(self, idOpti=1):
963
981
 
964
982
  self.dllFortran.compute_optimizer_py.restype = ct.c_int
965
983
  self.dllFortran.compute_optimizer_py.argtypes = [ct.POINTER(ct.c_int)]
966
984
 
967
- print("Launch a Fortran procedure")
985
+ logging.info(_("Launch a Fortran procedure"))
968
986
  isOk = self.dllFortran.compute_optimizer_py(ct.byref(ct.c_int(idOpti)))
969
- print("End of Fortran procedure")
987
+ logging.info(_("End of Fortran procedure"))
970
988
 
971
989
  if isOk!=0:
972
- print("ERROR: in the Fotran routine in the optimizer computation!")
990
+ logging.error("ERROR: in the Fotran routine in the optimizer computation!")
973
991
 
974
992
 
975
993
  def evaluate_model_optimizer(self, parameters:np.array, idOpti:int=1):
@@ -985,11 +1003,11 @@ class Optimisation(wx.Frame):
985
1003
  pointerDims = dims.ctypes.data_as(ct.POINTER(ct.c_int))
986
1004
  pointer_p = p.ctypes.data_as(ct.POINTER(ct.c_double))
987
1005
 
988
- print("Launch a Fortran procedure")
1006
+ logging.info(_("Launch a Fortran procedure"))
989
1007
  obj_fct = self.dllFortran.evaluate_model_optimizer_py(ct.byref(ct.c_int(idOpti)),
990
1008
  pointerDims,
991
1009
  pointer_p)
992
- print("End of Fortran procedure")
1010
+ logging.info(_("End of Fortran procedure"))
993
1011
 
994
1012
  return obj_fct
995
1013
 
@@ -1000,17 +1018,17 @@ class Optimisation(wx.Frame):
1000
1018
  self.dllFortran.write_mesh_results_optimizer_py.argtypes = [ct.POINTER(ct.c_int)]
1001
1019
 
1002
1020
 
1003
- print("Launch a Fortran procedure")
1021
+ logging.info(_("Launch a Fortran procedure"))
1004
1022
  isOk = self.dllFortran.write_mesh_results_optimizer_py(ct.byref(ct.c_int(idOpti)))
1005
- print("End of Fortran procedure")
1023
+ logging.info(_("End of Fortran procedure"))
1006
1024
 
1007
1025
  if isOk!=0:
1008
- print("ERROR: in the Fotran routine in the optimizer computation!")
1026
+ logging.error("ERROR: in the Fotran routine in the optimizer computation!")
1009
1027
 
1010
1028
 
1011
1029
  def init_optimizer(self, idForced=-1):
1012
1030
 
1013
- pathPtr = self.workingDir.encode('ansi')
1031
+ pathPtr = str(self.workingDir).encode('ansi')
1014
1032
  fileNamePtr = "test_opti.param".encode('ansi')
1015
1033
  self.dllFortran.init_optimizer_py.restype = ct.c_int
1016
1034
  self.dllFortran.init_optimizer_py.argtypes = [ct.c_char_p, ct.c_char_p, ct.c_int, ct.c_int,ct.POINTER(ct.c_int)]
@@ -1019,17 +1037,17 @@ class Optimisation(wx.Frame):
1019
1037
  opt_id = None
1020
1038
  else:
1021
1039
  opt_id = ct.byref(ct.c_int(idForced))
1022
- print("Launch a Fortran procedure")
1040
+ logging.info(_("Launch a Fortran procedure"))
1023
1041
  id = self.dllFortran.init_optimizer_py(pathPtr,fileNamePtr,ct.c_int(len(pathPtr)),ct.c_int(len(fileNamePtr)), opt_id)
1024
1042
 
1025
- print("id optimizer = ", id)
1043
+ logging.info(_("id optimizer = "), id)
1026
1044
 
1027
- print("End of Fortran procedure")
1045
+ logging.info(_("End of Fortran procedure"))
1028
1046
 
1029
1047
 
1030
1048
  def init_optimizer_again(self, event, idForced=1):
1031
1049
 
1032
- pathPtr = self.workingDir.encode('ansi')
1050
+ pathPtr = str(self.workingDir).encode('ansi')
1033
1051
  fileNamePtr = "test_opti.param".encode('ansi')
1034
1052
  self.dllFortran.init_optimizer_py.restype = ct.c_int
1035
1053
  self.dllFortran.init_optimizer_py.argtypes = [ct.c_char_p, ct.c_char_p, ct.c_int, ct.c_int,ct.POINTER(ct.c_int)]
@@ -1038,12 +1056,12 @@ class Optimisation(wx.Frame):
1038
1056
  opt_id = None
1039
1057
  else:
1040
1058
  opt_id = ct.byref(ct.c_int(idForced))
1041
- print("Launch a Fortran procedure")
1059
+ logging.info(_("Launch a Fortran procedure"))
1042
1060
  id = self.dllFortran.init_optimizer_py(pathPtr,fileNamePtr,ct.c_int(len(pathPtr)),ct.c_int(len(fileNamePtr)), opt_id)
1043
1061
 
1044
- print("id optimizer = ", id)
1062
+ logging.info(_("id optimizer = "), id)
1045
1063
 
1046
- print("End of Fortran procedure")
1064
+ logging.info(_("End of Fortran procedure"))
1047
1065
 
1048
1066
 
1049
1067
 
@@ -1054,9 +1072,9 @@ class Optimisation(wx.Frame):
1054
1072
 
1055
1073
  pathPtr = self.myCases[idLauncher].refCatchment.workingDir.encode('ansi')
1056
1074
 
1057
- print("Compute distributed hydro model ...")
1075
+ logging.info(_("Compute distributed hydro model ..."))
1058
1076
  isOk = self.dllFortran.compute_dist_hydro_model_py(pathPtr, ct.c_int(len(pathPtr)))
1059
- print("End of distributed hydro model.")
1077
+ logging.info(_("End of distributed hydro model."))
1060
1078
 
1061
1079
 
1062
1080
  def compute0_distributed_hydro_model(self, event):
@@ -1066,16 +1084,16 @@ class Optimisation(wx.Frame):
1066
1084
 
1067
1085
  idir=wx.DirDialog(None,"Choose an hydrology directory")
1068
1086
  if idir.ShowModal() == wx.ID_CANCEL:
1069
- print("Hydro computation cancelled!")
1087
+ logging.info(_("Hydrology computation cancelled!"))
1070
1088
  idir.Destroy()
1071
1089
  return
1072
1090
  pathPtr = idir.GetPath().encode('ansi')
1073
1091
  idir.Destroy()
1074
1092
 
1075
1093
 
1076
- print("Compute distributed hydro model ...")
1094
+ logging.info(_("Compute distributed hydro model ..."))
1077
1095
  isOk = self.dllFortran.compute_dist_hydro_model_py(pathPtr, ct.c_int(len(pathPtr)))
1078
- print("End of distributed hydro model.")
1096
+ logging.info(_("End of distributed hydro model."))
1079
1097
 
1080
1098
 
1081
1099
 
@@ -1108,7 +1126,7 @@ class Optimisation(wx.Frame):
1108
1126
 
1109
1127
 
1110
1128
  def associate_callback_fct(self):
1111
- print("")
1129
+ logging.info(_("Associate callback function ..."))
1112
1130
 
1113
1131
 
1114
1132
  def associate_callback_fct_update(self, idOpti=1, idLauncher=0):
@@ -1133,7 +1151,7 @@ class Optimisation(wx.Frame):
1133
1151
  self.dllFortran.associate_callback_fct(ct.byref(ct.c_int(idOpti)),ct.byref(ct.c_int(idLauncher+1)),
1134
1152
  ct.c_int(cste.fptr_update),pointerDims,update_ptr)
1135
1153
 
1136
- print("End of update pointer association!")
1154
+ logging.info(_("End of update pointer association!"))
1137
1155
 
1138
1156
 
1139
1157
 
@@ -1158,7 +1176,7 @@ class Optimisation(wx.Frame):
1158
1176
  self.dllFortran.associate_callback_fct(ct.byref(ct.c_int(idOpti)),ct.byref(ct.c_int(idLauncher+1)),
1159
1177
  ct.c_int(cste.fptr_get_cvg),pointerDims,getcvg_ptr)
1160
1178
 
1161
- print("End of pointer association!")
1179
+ logging.info(_("End of pointer association!"))
1162
1180
 
1163
1181
 
1164
1182
  def associate_ptr_q_all(self, idOpti=1, idLauncher=0):
@@ -1192,6 +1210,31 @@ class Optimisation(wx.Frame):
1192
1210
  counter += 1
1193
1211
 
1194
1212
 
1213
+ def associate_ptr_iv_saved(self, idOpti=1, idLauncher=0):
1214
+ # nb of arguments in the dimensions vector (dims)
1215
+ ndims = 3
1216
+ # init of the dimensions vector
1217
+ dims = np.zeros((ndims,), dtype=ct.c_int, order='F')
1218
+ pointerDims = dims.ctypes.data_as(ct.POINTER(ct.c_int))
1219
+
1220
+ counter = 1
1221
+ for iSub in self.myCases[idLauncher].refCatchment.myEffSortSubBasins:
1222
+ # curSub = self.refCatchment.subBasinDict[iSub]
1223
+ mydict = self.myCases[idLauncher].refCatchment.dictIdConversion
1224
+ idIP= list(mydict.keys())[list(mydict.values()).index(iSub)]
1225
+ curSub = self.myCases[idLauncher].refCatchment.subBasinDict[idIP]
1226
+ dims[2] = counter
1227
+ dims[0] = len(self.myCases[idLauncher].refCatchment.time)
1228
+ # call of the Fortran function
1229
+ curSub.ptr_iv_saved = None
1230
+ curSub.ptr_iv_saved = self.dllFortran.get_cptr_py(ct.byref(ct.c_int(idOpti)),ct.byref(ct.c_int(idLauncher+1)),
1231
+ ct.c_int(cste.ptr_iv_saved), pointerDims)
1232
+ curSub.saved_iv = None
1233
+ curSub.saved_iv = self.make_nd_array(curSub.ptr_iv_saved, shape=(dims[0],dims[1]), dtype=ct.c_double, order='F', own_data=False)
1234
+
1235
+ counter += 1
1236
+
1237
+
1195
1238
  def associate_ptr_time_delays(self, idOpti=1, idLauncher=0):
1196
1239
  # nb of arguments in the dimensions vector (dims)
1197
1240
  ndims = 1
@@ -1226,7 +1269,7 @@ class Optimisation(wx.Frame):
1226
1269
  isOk = self.dllFortran.associate_ptr_py(ct.byref(ct.c_int(idOpti)),ct.byref(ct.c_int(idLauncher+1)), ct.c_int(cste.ptr_params),
1227
1270
  pointerDims, pointerParam)
1228
1271
 
1229
- print("End of param pointer association.")
1272
+ logging.info(_("End of parameter pointer association."))
1230
1273
 
1231
1274
 
1232
1275
 
@@ -1244,22 +1287,88 @@ class Optimisation(wx.Frame):
1244
1287
  isOk = self.dllFortran.associate_ptr_py(ct.byref(ct.c_int(idOpti)),ct.byref(ct.c_int(idLauncher+1)), ct.c_int(cste.ptr_opti_factors),
1245
1288
  pointerDims, ct.byref(self.optiFactor_F))
1246
1289
 
1247
- print("End of factor pointer association.")
1290
+ logging.info(_("End of factor pointer association."))
1291
+
1292
+
1293
+ def get_all_activated_iv(self, idOpti:int=1, idLauncher:int=0,
1294
+ iv_variables:tuple[np.ndarray, np.ndarray]=None)-> tuple[np.ndarray, np.ndarray]:
1295
+ def check_iv_variables(iv_ids, iv_values, expected_nb_iv):
1296
+ assert iv_ids.dtype == np.dtype(ct.c_int), "The vector of ids iv_ids dtype is not ct.c_int (float64)"
1297
+ assert iv_ids.flags['F_CONTIGUOUS'], "The vector of ids iv_ids is not Fortran-ordered (order='F')"
1298
+ assert iv_ids.shape[0] == expected_nb_iv, f"Expected {expected_nb_iv} ids, got {iv_ids.shape[0]}"
1299
+ assert iv_values.dtype == np.dtype(ct.c_double), "Array of i.v. dtype is not ct.c_double (float64)"
1300
+ assert iv_values.flags['F_CONTIGUOUS'], "Array of i.v. is not Fortran-ordered (order='F')"
1301
+ assert iv_values.shape[1] == expected_nb_iv, f"Expected {expected_nb_iv} values per id, got {iv_values.shape[1]}"
1302
+
1303
+ self.dllFortran.get_nb_activated_iv_py.restype = ct.c_int
1304
+ self.dllFortran.get_nb_activated_iv_py.argtypes = [ct.POINTER(ct.c_int),
1305
+ ct.POINTER(ct.c_int),
1306
+ ct.POINTER(ct.c_int)]
1307
+
1308
+ self.dllFortran.get_all_activated_iv_py.restype = ct.c_int
1309
+ self.dllFortran.get_all_activated_iv_py.argtypes = [ct.POINTER(ct.c_int),
1310
+ ct.POINTER(ct.c_int),
1311
+ ct.POINTER(ct.c_int),
1312
+ ct.POINTER(ct.c_int),
1313
+ ct.POINTER(ct.c_int),
1314
+ ct.POINTER(ct.c_double)]
1315
+
1316
+ nb_iv = ct.c_int()
1317
+ logging.info(_("Launch a Fortran procedure"))
1318
+ isOk = self.dllFortran.get_nb_activated_iv_py(ct.byref(ct.c_int(idOpti)),
1319
+ ct.byref(ct.c_int(idLauncher+1)),
1320
+ ct.byref(nb_iv))
1321
+ if isOk!=0:
1322
+ logging.error("Problem in the Fortran routine in get_nb_activated_iv_py!")
1323
+ return None, None
1324
+ if nb_iv.value == 0:
1325
+ logging.warning("No activated input variables found in the Fortran routine in get_nb_activated_iv_py!")
1326
+ return None, None
1327
+
1328
+ curCatch:Catchment = self.myCases[idLauncher].refCatchment
1329
+ nb_t = ct.c_int(len(curCatch.time))
1330
+ if iv_variables is not None:
1331
+ # If iv_variables is provided, use its shape to determine the number of intervals
1332
+ iv_ids, iv_values = iv_variables
1333
+ check_iv_variables(iv_ids, iv_values, nb_iv.value)
1334
+ else:
1335
+ iv_ids = np.zeros((nb_iv.value,), dtype=ct.c_int, order='F')
1336
+ iv_values = np.zeros((nb_t.value, nb_iv.value), dtype=ct.c_double, order='F')
1337
+ ptr_ids = iv_ids.ctypes.data_as(ct.POINTER(ct.c_int))
1338
+ ptr_values = iv_values.ctypes.data_as(ct.POINTER(ct.c_double))
1339
+ # FIXME: consider all effective sub-basin
1340
+ # TODO: generalise for when several effective subbasins
1341
+ id_sub_eff = ct.c_int(0)
1342
+
1343
+ isOk = self.dllFortran.get_all_activated_iv_py(ct.byref(ct.c_int(idOpti)),
1344
+ ct.byref(ct.c_int(idLauncher+1)),
1345
+ ct.byref(nb_t),
1346
+ ct.byref(nb_iv),
1347
+ ptr_ids,
1348
+ ptr_values,
1349
+ id_sub_eff)
1350
+ if isOk<0:
1351
+ logging.error("Problem in the Fortran routine in get_all_activated_iv_py!")
1352
+ return None, None
1353
+
1354
+ logging.info(_("End of Fortran procedure"))
1355
+
1356
+ return iv_ids, iv_values
1248
1357
 
1249
1358
 
1250
1359
  def init_distributed_hydro_model(self, event):
1251
1360
 
1252
- pathPtr = self.workingDir.encode('ansi')
1361
+ pathPtr = str(self.workingDir).encode('ansi')
1253
1362
  fileNamePtr = "test_opti.param".encode('ansi')
1254
1363
  self.dllFortran.init_dist_hydro_model_py.restype = ct.c_int
1255
1364
  self.dllFortran.init_dist_hydro_model_py.argtypes = []
1256
1365
 
1257
- print("Launch a Fortran procedure")
1366
+ logging.info(_("Launch a Fortran procedure"))
1258
1367
  id = self.dllFortran.init_dist_hydro_model_py()
1259
1368
 
1260
- print("id distributed_hydro_model = ", id)
1369
+ logging.info(_("id distributed_hydro_model = %d"), id)
1261
1370
 
1262
- print("End of Fortran procedure")
1371
+ logging.info(_("End of Fortran procedure"))
1263
1372
 
1264
1373
 
1265
1374
  def launch_lumped_optimisation(self, event, idOpti=1):
@@ -1273,8 +1382,8 @@ class Optimisation(wx.Frame):
1273
1382
  # Launch Fortran routine to compute optimisation and write the best results
1274
1383
  self.compute_optimizer(idOpti=idOpti)
1275
1384
 
1276
- print("Best parameters : ", self.curParams_vec_F)
1277
- print("Best Factor = ", self.optiFactor_F)
1385
+ logging.info(_("Best parameters : %s"), self.curParams_vec_F)
1386
+ logging.info(_("Best Factor = %s"), self.optiFactor_F)
1278
1387
 
1279
1388
  # Apply the best parameters
1280
1389
  self.apply_optim(None)
@@ -1390,30 +1499,29 @@ class Optimisation(wx.Frame):
1390
1499
  best_params_overall = best_params
1391
1500
  i_best_overal = cur_i
1392
1501
  # copy the optimisation results to save it on the disk
1393
- shutil.copyfile(os.path.join(self.workingDir, stationOut+".rpt.dat"),
1394
- os.path.join(self.workingDir, stationOut+"_"+str(cur_i+1)+".rpt.dat"))
1395
- shutil.copyfile(os.path.join(self.workingDir, stationOut+".rpt"),
1396
- os.path.join(self.workingDir, stationOut+"_"+str(cur_i+1)+".rpt"))
1502
+ shutil.copyfile(self.workingDir / (stationOut+".rpt.dat"),
1503
+ self.workingDir / (stationOut+"_"+str(cur_i+1)+".rpt.dat"))
1504
+ shutil.copyfile(self.workingDir / (stationOut+".rpt"),
1505
+ self.workingDir / (stationOut+"_"+str(cur_i+1)+".rpt"))
1397
1506
  cur_i += 1
1398
1507
  # Apply the best parameters overall attemps
1399
1508
  self.apply_optim(None,optim_params=best_params_overall)
1400
1509
  # Reset the init parameters
1401
1510
  self.reset_init_params(init_params)
1402
1511
  # copy the optimisation results to save it on the disk
1403
- shutil.copyfile(os.path.join(self.workingDir, stationOut+"_"+str(i_best_overal+1)+".rpt.dat"),
1404
- os.path.join(self.workingDir, stationOut+".rpt.dat"))
1405
- shutil.copyfile(os.path.join(self.workingDir, stationOut+"_"+str(i_best_overal+1)+".rpt"),
1406
- os.path.join(self.workingDir, stationOut+".rpt"))
1407
-
1512
+ shutil.copyfile(self.workingDir / (stationOut+"_"+str(i_best_overal+1)+".rpt.dat"),
1513
+ self.workingDir / (stationOut+".rpt.dat"))
1514
+ shutil.copyfile(self.workingDir/ (stationOut+"_"+str(i_best_overal+1)+".rpt"),
1515
+ self.workingDir / (stationOut+".rpt"))
1408
1516
 
1409
1517
  # Simulation with the best parameters
1410
1518
  self.compute_distributed_hydro_model()
1411
1519
  cur_p = best_params_overall[:-1]
1412
1520
  cur_obj = best_params_overall[-1]
1413
1521
  cur_obj2 = self.evaluate_model_optimizer(cur_p, idOpti=idOpti)
1414
- print("cur_obj : ", cur_obj, " ; cur_obj2 : ", cur_obj2)
1522
+ logging.info(_("cur_obj : %s ; cur_obj2 : %s"), cur_obj, cur_obj2)
1415
1523
  if cur_obj != cur_obj2:
1416
- logging.error("The objective function is not the same as the one computed")
1524
+ logging.error(_("The objective function is not the same as the one computed"))
1417
1525
  # Update myHydro of all effective subbasins to get the best configuration upstream
1418
1526
  curCatch.read_hydro_eff_subBasin()
1419
1527
  # Update timeDelays according to time wolf_array
@@ -1441,7 +1549,7 @@ class Optimisation(wx.Frame):
1441
1549
  # Possibility to use the optimisation results enabled
1442
1550
  self.enable_MenuBar("Tools")
1443
1551
 
1444
- print("End of semi-distributed optimisation!")
1552
+ logging.info(_("End of semi-distributed optimisation!"))
1445
1553
 
1446
1554
 
1447
1555
  # TO DO : Change this function to Case -> to make it compatible with several cases.
@@ -1468,11 +1576,11 @@ class Optimisation(wx.Frame):
1468
1576
 
1469
1577
  isOk = self.myCases[0].refCatchment.update_hydro(idCompar, fromLevel=False)
1470
1578
  tf = time_mod.process_time()
1471
- print("Time in update_hydro() : ", tf-t0)
1472
- print("curParam = ", self.curParams_vec_F)
1473
- print("All timeDelays = ", self.myCases[0].refCatchment.get_all_timeDelay())
1579
+ logging.info(_("Time in update_hydro() : %s"), tf-t0)
1580
+ logging.info(_("curParam = %s"), self.curParams_vec_F)
1581
+ logging.info(_("All timeDelays = %s"), self.myCases[0].refCatchment.get_all_timeDelay())
1474
1582
  tf = time_mod.process_time()
1475
- print("Time in update_hydro() : ", tf-t0)
1583
+ logging.info(_("Time in update_hydro() : %s"), tf-t0)
1476
1584
  return isOk
1477
1585
 
1478
1586
 
@@ -1583,32 +1691,32 @@ class Optimisation(wx.Frame):
1583
1691
  self.saParam.change_param("Lowest values", paramName, 0.0)
1584
1692
  else:
1585
1693
  if float(cur_param) != 0.0:
1586
- logging.warning("The parameters applied to timeDelays are different than the ones recommanded!")
1587
- logging.warning("This procedure can be dangerous in semi distributed optimisation! Do it at your own risk!")
1694
+ logging.warning(_("The parameters applied to timeDelays are different than the ones recommanded!"))
1695
+ logging.warning(_("This procedure can be dangerous in semi distributed optimisation! Do it at your own risk!"))
1588
1696
 
1589
1697
  cur_param = self.saParam.get_param("Highest values",paramName)
1590
1698
  if cur_param is None:
1591
1699
  self.saParam.change_param("Highest values", paramName, 5.0*24.0*3600.0)
1592
1700
  else:
1593
1701
  if float(cur_param) != 5.0*24.0*3600.0:
1594
- logging.warning("The parameters applied to timeDelays are different than the ones recommanded!")
1595
- logging.warning("This procedure can be dangerous in semi distributed optimisation! Do it at your own risk!")
1702
+ logging.warning(_("The parameters applied to timeDelays are different than the ones recommanded!"))
1703
+ logging.warning(_("This procedure can be dangerous in semi distributed optimisation! Do it at your own risk!"))
1596
1704
 
1597
1705
  cur_param = self.saParam.get_param("Steps",paramName)
1598
1706
  if cur_param is None:
1599
1707
  self.saParam.change_param("Steps", paramName, self.myCases[idLauncher].refCatchment.deltaT)
1600
1708
  else:
1601
1709
  if float(cur_param) != self.myCases[idLauncher].refCatchment.deltaT:
1602
- logging.warning("The parameters applied to timeDelays are different than the ones recommanded!")
1603
- logging.warning("This procedure can be dangerous in semi distributed optimisation! Do it at your own risk!")
1710
+ logging.warning(_("The parameters applied to timeDelays are different than the ones recommanded!"))
1711
+ logging.warning(_("This procedure can be dangerous in semi distributed optimisation! Do it at your own risk!"))
1604
1712
 
1605
1713
  cur_param = self.saParam.get_param("Initial parameters",paramName)
1606
1714
  if cur_param is None:
1607
1715
  self.saParam.change_param("Initial parameters", paramName, 1.0*3600.0)
1608
1716
  else:
1609
1717
  if float(cur_param) != 1.0*3600.0:
1610
- logging.warning("The parameters applied to timeDelays are different than the ones recommanded!")
1611
- logging.warning("This procedure can be dangerous in semi distributed optimisation! Do it at your own risk!")
1718
+ logging.warning(_("The parameters applied to timeDelays are different than the ones recommanded!"))
1719
+ logging.warning(_("This procedure can be dangerous in semi distributed optimisation! Do it at your own risk!"))
1612
1720
 
1613
1721
  else:
1614
1722
  self.nbParams = nbParamsModel
@@ -1828,7 +1936,7 @@ class Optimisation(wx.Frame):
1828
1936
  stationOut = sortJct[iOpti]
1829
1937
  compareFileName = readDict[stationOut]
1830
1938
  # Copy the correct compare.txt file
1831
- shutil.copyfile(os.path.join(self.workingDir,compareFileName), os.path.join(self.workingDir,"compare.txt"))
1939
+ shutil.copyfile(self.workingDir / compareFileName, self.workingDir /"compare.txt")
1832
1940
  # Save the name of the station that will be the output
1833
1941
  curCatch.define_station_out(stationOut)
1834
1942
  # Activate all the useful subs and write it in the param file
@@ -1881,7 +1989,7 @@ class Optimisation(wx.Frame):
1881
1989
  stationOut = sortJct[iOpti]
1882
1990
  compareFileName = readDict[stationOut]
1883
1991
  # Copy the correct compare.txt file
1884
- shutil.copyfile(os.path.join(self.workingDir,compareFileName), os.path.join(self.workingDir,"compare.txt"))
1992
+ shutil.copyfile(self.workingDir / compareFileName, self.workingDir /"compare.txt")
1885
1993
  # Save the name of the station that will be the output
1886
1994
  curCatch.define_station_out(stationOut)
1887
1995
  # Activate all the useful subs and write it in the param file
@@ -1927,7 +2035,7 @@ class Optimisation(wx.Frame):
1927
2035
 
1928
2036
  if format=="rpt":
1929
2037
  for cur_file in all_names:
1930
- optimFile = os.path.join(self.workingDir, cur_file+".rpt")
2038
+ optimFile = self.workingDir / (cur_file+".rpt")
1931
2039
 
1932
2040
  try:
1933
2041
  with open(optimFile, newline = '') as fileID:
@@ -1955,7 +2063,7 @@ class Optimisation(wx.Frame):
1955
2063
 
1956
2064
  elif format==".dat":
1957
2065
  for cur_file in all_names:
1958
- optimFile = os.path.join(self.workingDir, cur_file+".rpt.dat")
2066
+ optimFile = self.workingDir / (cur_file+".rpt.dat")
1959
2067
  isOk, optimFile = check_path(optimFile)
1960
2068
  if isOk>0:
1961
2069
  allData = read_bin(self.workingDir, cur_file+".rpt.dat", uniform_format=8)
@@ -2140,18 +2248,18 @@ class Optimisation(wx.Frame):
2140
2248
  if intervals is None:
2141
2249
  self.all_intervals = self._read_opti_intervals(idLauncher=idLauncher)
2142
2250
 
2143
- compare_file = os.path.join(self.workingDir,"compare.how.param")
2251
+ compare_file = self.workingDir / "compare.how.param"
2144
2252
 
2145
2253
  # In case of a problem, the initial compare file is copied
2146
- compare_file_cp = os.path.join(self.workingDir,"compare.how_"+suffix+"_tmp.param")
2254
+ compare_file_cp = self.workingDir / ("compare.how_"+suffix+"_tmp.param")
2147
2255
  isOk, compare_file_cp = check_path(compare_file_cp)
2148
2256
  if isOk<0 and stationOut=="":
2149
- compare_file_cp = os.path.join(self.workingDir,"compare.how_"+suffix+".param")
2257
+ compare_file_cp = self.workingDir / ("compare.how_"+suffix+".param")
2150
2258
  shutil.copyfile(compare_file, compare_file_cp)
2151
- print("The following file has been copied : ", compare_file_cp)
2259
+ logging.info(_("The following file has been copied : "), compare_file_cp)
2152
2260
  else:
2153
2261
  shutil.copyfile(compare_file, compare_file_cp)
2154
- print("The following file has been copied : ", compare_file_cp)
2262
+ logging.info(_("The following file has been copied : %s"), compare_file_cp)
2155
2263
 
2156
2264
  if self.all_intervals is None:
2157
2265
  return -1
@@ -2192,16 +2300,17 @@ class Optimisation(wx.Frame):
2192
2300
  cur_basin = cur_ref.catchmentDict[keyBasin]
2193
2301
 
2194
2302
  # Select the optimisation intervals that are relevant according to the available measures
2195
- effective_intv = [interv for interv in all_intervals if interv[0]>=cur_basin.dateBegin and interv[1]<=cur_basin.dateEnd]
2303
+ # effective_intv = [interv for interv in all_intervals if interv[0]>=cur_basin.dateBegin and interv[1]<=cur_basin.dateEnd]
2304
+ effective_intv = self._intersect_intervals(all_intervals, (cur_basin.dateBegin, cur_basin.dateEnd))
2196
2305
  if filter_nan:
2197
2306
  effective_intv = self._define_intervals_with_nan_measures(effective_intv, self.compareSubBasins,
2198
2307
  idLauncher=idLauncher, stationOut=stationOut)
2199
-
2308
+ effective_intv = self._define_intervals_with_nan_inlets(effective_intv, {cur_basin.name: cur_basin},
2309
+ idLauncher=idLauncher, stationOut=stationOut)
2200
2310
  return effective_intv
2201
2311
 
2202
2312
 
2203
- def _define_intervals_with_nan_measures(self, intervals: list[tuple[datetime.datetime, datetime.datetime]], measures: dict[str, SubBasin],
2204
- idLauncher: int = 0, stationOut: str = ""):
2313
+ def _define_intervals_with_ts(self, intervals: list[tuple[datetime.datetime, datetime.datetime]], time:np.ndarray, ts:np.ndarray, idLauncher: int = 0):
2205
2314
  """
2206
2315
  Defines new intervals excluding all NaN measures based on the given intervals and measures dictionary.
2207
2316
  For instance, if there is continuous NaN measures within a given interval, the function will split
@@ -2209,7 +2318,7 @@ class Optimisation(wx.Frame):
2209
2318
 
2210
2319
  Args:
2211
2320
  intervals (list[tuple[datetime.datetime, datetime.datetime]]): A list of intervals represented as tuples of start and end datetime objects.
2212
- measures (dict[str, SubBasin]): A dictionary of measures where the keys are station names and the values are SubBasin objects.
2321
+ ts (dict[str, SubBasin]): A dictionary of time series where the keys are station names and the values are vectors of numpy array.
2213
2322
  idLauncher (int, optional): The id of the launcher. Defaults to 0.
2214
2323
  stationOut (str, optional): The station name. Defaults to "".
2215
2324
 
@@ -2220,15 +2329,9 @@ class Optimisation(wx.Frame):
2220
2329
  None
2221
2330
 
2222
2331
  """
2223
- if stationOut not in measures:
2224
- logging.error("The stationOut is not in the measures dictionary!")
2225
- return None
2226
2332
 
2227
- cur_el = measures[stationOut]
2228
- hydro = cur_el.get_myHydro()
2229
- time = cur_el.time
2230
2333
  # get the indices of the nan values
2231
- non_nan_locations = ~np.isnan(hydro)
2334
+ non_nan_locations = ~np.isnan(ts)
2232
2335
  within_intervals = np.sum(
2233
2336
  [(time >= datetime.datetime.timestamp(interv[0])) *
2234
2337
  (time <= datetime.datetime.timestamp(interv[1]))
@@ -2257,6 +2360,68 @@ class Optimisation(wx.Frame):
2257
2360
  return interv_dates
2258
2361
 
2259
2362
 
2363
+ def _define_intervals_with_nan_inlets(self, intervals: list[tuple[datetime.datetime, datetime.datetime]], measures: dict[str, SubBasin],
2364
+ idLauncher: int = 0, stationOut: str = ""):
2365
+ """
2366
+ Defines new intervals excluding all NaN measures based on the given intervals and measures dictionary.
2367
+ For instance, if there is continuous NaN measures within a given interval, the function will split
2368
+ that interval into smaller that do not contain NaN measures.
2369
+
2370
+ Args:
2371
+ intervals (list[tuple[datetime.datetime, datetime.datetime]]): A list of intervals represented as tuples of start and end datetime objects.
2372
+ measures (dict[str, SubBasin]): A dictionary of measures where the keys are station names and the values are SubBasin objects.
2373
+ idLauncher (int, optional): The id of the launcher. Defaults to 0.
2374
+ stationOut (str, optional): The station name. Defaults to "".
2375
+
2376
+ Returns:
2377
+ list[tuple[datetime.datetime, datetime.datetime]]: A list of intervals with NaN measures.
2378
+
2379
+ Raises:
2380
+ None
2381
+
2382
+ """
2383
+ if stationOut not in measures:
2384
+ logging.error("The stationOut is not in the measures dictionary!")
2385
+ return None
2386
+
2387
+ cur_el = measures[stationOut]
2388
+ hydro = cur_el.get_inlets()
2389
+ time = cur_el.time
2390
+
2391
+ return self._define_intervals_with_ts(intervals, time, hydro, idLauncher=idLauncher)
2392
+
2393
+
2394
+ def _define_intervals_with_nan_measures(self, intervals: list[tuple[datetime.datetime, datetime.datetime]], measures: dict[str, SubBasin],
2395
+ idLauncher: int = 0, stationOut: str = ""):
2396
+ """
2397
+ Defines new intervals excluding all NaN measures based on the given intervals and measures dictionary.
2398
+ For instance, if there is continuous NaN measures within a given interval, the function will split
2399
+ that interval into smaller that do not contain NaN measures.
2400
+
2401
+ Args:
2402
+ intervals (list[tuple[datetime.datetime, datetime.datetime]]): A list of intervals represented as tuples of start and end datetime objects.
2403
+ measures (dict[str, SubBasin]): A dictionary of measures where the keys are station names and the values are SubBasin objects.
2404
+ idLauncher (int, optional): The id of the launcher. Defaults to 0.
2405
+ stationOut (str, optional): The station name. Defaults to "".
2406
+
2407
+ Returns:
2408
+ list[tuple[datetime.datetime, datetime.datetime]]: A list of intervals with NaN measures.
2409
+
2410
+ Raises:
2411
+ None
2412
+
2413
+ """
2414
+ if stationOut not in measures:
2415
+ logging.error("The stationOut is not in the measures dictionary!")
2416
+ return None
2417
+
2418
+ cur_el = measures[stationOut]
2419
+ hydro = cur_el.get_myHydro()
2420
+ time = cur_el.time
2421
+
2422
+ return self._define_intervals_with_ts(intervals, time, hydro, idLauncher=idLauncher)
2423
+
2424
+
2260
2425
  def save_opti_dates_to_file(self, opti_dates:list[tuple[datetime.datetime,datetime.datetime]]):
2261
2426
  """
2262
2427
  Here the procedure is saving the intervals of dates for calibration in the compare.how.param
@@ -2321,7 +2486,7 @@ class Optimisation(wx.Frame):
2321
2486
  return
2322
2487
  for i in range(self.nbParams):
2323
2488
  self.saParam.change_param("Initial parameters", " ".join(["Parameter",str(i+1)]), init_params[i])
2324
- print("Reset init params : ", init_params)
2489
+ logging.info(_("Reset init params : %s"), init_params)
2325
2490
  self.saParam.SavetoFile(None)
2326
2491
  self.saParam.Reload(None)
2327
2492
 
@@ -2509,7 +2674,7 @@ class Optimisation(wx.Frame):
2509
2674
  header = f"{data.shape[0]:d}\t{data.shape[1]:d}"
2510
2675
  # Write to file
2511
2676
  np.savetxt(
2512
- os.path.join(self.workingDir, "compare.txt"),
2677
+ self.workingDir / "compare.txt",
2513
2678
  data,
2514
2679
  header=header,
2515
2680
  fmt=["%d", "%e"],
@@ -2580,7 +2745,7 @@ class Optimisation(wx.Frame):
2580
2745
  # Activate the writing of the internal variables
2581
2746
  curCatch.activate_all_internal_variables()
2582
2747
  # Prepare the Excel writer
2583
- writer_tot = pd.ExcelWriter(os.path.join(self.workingDir, "all_best_tests.xlsx"), engine = 'xlsxwriter')
2748
+ writer_tot = pd.ExcelWriter(self.workingDir / "all_best_tests.xlsx", engine = 'xlsxwriter')
2584
2749
 
2585
2750
  for iOpti in range(len(sortJct)):
2586
2751
  stationOut = sortJct[iOpti]
@@ -2592,9 +2757,6 @@ class Optimisation(wx.Frame):
2592
2757
  curCatch.define_station_out(stationOut)
2593
2758
  # Activate all the useful subs and write it in the param file
2594
2759
  curCatch.activate_usefulSubs(blockJunction=doneList, onlyItself=onlyOwnSub)
2595
- # Select correct calibration intervals -> remove the intervals with NaN
2596
- cur_intervals = self.select_opti_intervals(all_intervals=all_intervals, stationOut=stationOut, filter_nan=True)
2597
- self.save_opti_dates_to_file(cur_intervals)
2598
2760
  # Rename the result file
2599
2761
  self.optiParam.change_param("Optimizer", "fname", stationOut)
2600
2762
  self.optiParam.SavetoFile(None)
@@ -2604,6 +2766,9 @@ class Optimisation(wx.Frame):
2604
2766
  self.prepare_calibration_timeDelay(stationOut=stationOut)
2605
2767
  # Reload the useful modules
2606
2768
  self.reload_hydro(idCompar=0, fromStation=stationOut, lastLevel=previousLevel, updateAll=True)
2769
+ # Select correct calibration intervals -> remove the intervals with NaN
2770
+ cur_intervals = self.select_opti_intervals(all_intervals=all_intervals, stationOut=stationOut, filter_nan=True)
2771
+ self.save_opti_dates_to_file(cur_intervals)
2607
2772
  ## =======
2608
2773
  ## Init
2609
2774
  ## =======
@@ -2620,7 +2785,7 @@ class Optimisation(wx.Frame):
2620
2785
  cur_p = all_params[i, :-1]
2621
2786
  cur_obj = all_params[i, -1]
2622
2787
  cur_obj2 = self.evaluate_model_optimizer(cur_p, idOpti=idOpti)
2623
- print("cur_obj : ", cur_obj, " ; cur_obj2 : ", cur_obj2)
2788
+ logging.info(_("cur_obj : %s ; cur_obj2 : %s"), cur_obj, cur_obj2)
2624
2789
  if cur_obj != cur_obj2:
2625
2790
  logging.error("The objective function is not the same as the one computed by the model!")
2626
2791
  logging.error("cur_obj : "+str(cur_obj)+" ; cur_obj2 : "+str(cur_obj2))
@@ -2652,7 +2817,7 @@ class Optimisation(wx.Frame):
2652
2817
 
2653
2818
  cur_df = pd.DataFrame(all_frac, columns=var_names)
2654
2819
  # write first the tempory results for each station
2655
- writer_stat = pd.ExcelWriter(os.path.join(self.workingDir, stationOut+"_tests.xlsx"), engine = 'xlsxwriter')
2820
+ writer_stat = pd.ExcelWriter(self.workingDir / (stationOut+"_tests.xlsx"), engine = 'xlsxwriter')
2656
2821
  cur_df.to_excel(writer_stat, sheet_name=stationOut, columns=var_names)
2657
2822
  writer_stat.sheets[stationOut].autofit()
2658
2823
  writer_stat.close()
@@ -2682,7 +2847,7 @@ class Optimisation(wx.Frame):
2682
2847
 
2683
2848
 
2684
2849
  def get_best_params(self, stationOut:str,
2685
- criterion:str="Nash", quantile:float=0.99, std:float=0.05, eps:float=0.2, rmv_near_max=1e-4, nb_rand_close:int=10,
2850
+ criterion:str="Nash", quantile:float=None, std:float=None, eps:float=0.2, rmv_near_max=None, nb_rand_close:int=10,
2686
2851
  objective_fct:bool= True, apply_clustering:bool=False, objective_weight:float=1.0):
2687
2852
  from sklearn.cluster import DBSCAN
2688
2853
  """
@@ -2701,10 +2866,19 @@ class Optimisation(wx.Frame):
2701
2866
 
2702
2867
  best_objfct = self.collect_optim()[-1]
2703
2868
  all_params, all_obj_fct = self.read_all_attempts_SA(format=".dat", all_attempts=True)
2869
+ if quantile is not None:
2870
+ quantile_cond = (all_obj_fct > np.quantile(all_obj_fct, quantile))
2871
+ else:
2872
+ quantile_cond = np.ones_like(all_obj_fct, dtype=bool)
2873
+ if std is not None:
2874
+ std_cond = (all_obj_fct > best_objfct*(1-std))
2875
+ else:
2876
+ std_cond = np.ones_like(all_obj_fct, dtype=bool)
2877
+ if rmv_near_max is not None:
2878
+ tooclose_cond = (all_obj_fct < best_objfct*(1-rmv_near_max)) | (all_obj_fct == best_objfct)
2879
+ else:
2880
+ tooclose_cond = np.ones_like(all_obj_fct, dtype=bool)
2704
2881
 
2705
- quantile_cond = (all_obj_fct > np.quantile(all_obj_fct, quantile))
2706
- std_cond = (all_obj_fct > best_objfct*(1-std))
2707
- tooclose_cond = (all_obj_fct < best_objfct*(1-rmv_near_max)) | (all_obj_fct == best_objfct)
2708
2882
  all_cond = np.where(quantile_cond & std_cond & tooclose_cond)[0]
2709
2883
  eff_params = all_params[all_cond]
2710
2884
  eff_obj = all_obj_fct[all_cond]
@@ -2723,13 +2897,19 @@ class Optimisation(wx.Frame):
2723
2897
  tot_add_params = np.column_stack((selected_params, selected_obj))
2724
2898
  # Add the selected parameters to the eff_params
2725
2899
  eff_params = np.vstack((eff_params, tot_add_params))
2726
- # add to the eff_params
2727
2900
 
2728
2901
  # In this part we filter abd remove the parameters that are almost equivalent
2729
2902
  # To do so, we use the DBSCAN clustering algorithm to group the parameters that are close to each other
2730
2903
  # and only keep the set of parameter that has the best Nash-Sutcliffe efficiency per group
2731
2904
  # The parameters that are not grouped are considered had "particular" and are still kept in the final set
2732
2905
  if apply_clustering:
2906
+ # If the number of lines of the parameters is higher than 500_000, we select a random sample of 500_000 lines
2907
+ if np.shape(eff_params)[0] > 100_000:
2908
+ logging.warning("The number of parameters is higher than 500_000. A random sample of 500_000 parameters will be selected.")
2909
+ # Select a random sample of 500_000 lines
2910
+ idx = np.random.choice(np.shape(eff_params)[0], size=100_000, replace=False)
2911
+ eff_params = eff_params[idx]
2912
+ return eff_params
2733
2913
  # "Normalise" or scale btw [0;1] the parameter vector to make the clustering more efficient
2734
2914
  min_param = np.min(eff_params, axis=0)
2735
2915
  max_param = np.max(eff_params, axis=0)
@@ -2737,8 +2917,8 @@ class Optimisation(wx.Frame):
2737
2917
  # Add weight to the objective function to make it more important in the clustering
2738
2918
  # FIXME : to be improved
2739
2919
  norm_params[:,-1] = norm_params[:,-1]*objective_weight
2740
- # Apply the DBSCAN clustering algorithm to group the parameters
2741
- db = DBSCAN(eps=eps).fit(norm_params)
2920
+ # Apply the DBSCAN clustering algorithm to group the parameters and conversion to float32 to avoid memory issues
2921
+ db = DBSCAN(eps=eps,metric='euclidean').fit(norm_params.astype(np.float32))
2742
2922
  labels = db.labels_
2743
2923
  # Extraction of the number of groups and particular cases
2744
2924
  n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
@@ -2785,7 +2965,8 @@ class Optimisation(wx.Frame):
2785
2965
  for el in intervals:
2786
2966
  date_i = datetime.datetime.timestamp(el[0])
2787
2967
  date_f = datetime.datetime.timestamp(el[1])
2788
- interv += (curCatch.time>=date_i) & (curCatch.time<=date_f)
2968
+ interv += (curCatch.time>=date_i) & (curCatch.time<=date_f) & \
2969
+ (~np.isnan(compare)) & (~np.isnan(simul)) & (~np.isinf(simul))
2789
2970
  else:
2790
2971
  interv = np.ones(len(curCatch.time), dtype=bool)
2791
2972
 
@@ -2839,7 +3020,23 @@ class Optimisation(wx.Frame):
2839
3020
  return cur_fracts
2840
3021
 
2841
3022
  def _get_flow_fractions(self, idLauncher:int=0, stationOut:str="",
2842
- intervals:list[tuple[datetime.datetime, datetime.datetime]]=[]) -> dict[list[str], list[float]]:
3023
+ intervals:list[tuple[datetime.datetime, datetime.datetime]]=[],
3024
+ from_full_matrix:tuple[np.ndarray, np.ndarray]=None) -> dict[list[str], list[float]]:
3025
+ """This function retrieves the flow fractions for a given sub-basin.
3026
+ It can also take a "full matrix" of internal variables to compute the fractions.
3027
+ This "full matrix" is a tuple containing the ids of the internal variables and the matrix itself.
3028
+
3029
+ :param idLauncher: ID of the launcher, defaults to 0
3030
+ :type idLauncher: int, optional
3031
+ :param stationOut: Name of the outlet station, defaults to ""
3032
+ :type stationOut: str, optional
3033
+ :param intervals: List of start date and end date for the intervals to evaluate the flow fractions, defaults to []
3034
+ :type intervals: list[tuple[datetime.datetime, datetime.datetime]], optional
3035
+ :param from_full_matrix: Containing the matrix (nb_t, nb_iv) of I.V. coming from the Fortran code directly (not read in files), defaults to None
3036
+ :type from_full_matrix: tuple[np.ndarray, np.ndarray], optional
3037
+ :return: Dictionary containing the flow fractions for each type of flow with their names as keys and the fractions as values.
3038
+ :rtype: dict[list[str], list[float]]
3039
+ """
2843
3040
 
2844
3041
  curCatch:Catchment = self.myCases[idLauncher].refCatchment
2845
3042
  cur_key = curCatch.get_key_catchmentDict(stationOut)
@@ -2847,12 +3044,39 @@ class Optimisation(wx.Frame):
2847
3044
  if type(curBasin) != SubBasin:
2848
3045
  logging.warning("The current module is not a SubBasin object!")
2849
3046
  return None
2850
- cur_fracts = curBasin.get_flow_fractions(interval=intervals, summary="mean")
3047
+ if from_full_matrix is not None:
3048
+ ids, iv_matrix = from_full_matrix
3049
+ all_f = mc.MODELS_VAR[curBasin.model].get_dict_from_matrix_and_ids(iv_matrix, list(ids),type_of_var=iv.FINAL_OUT_VAR)
3050
+ all_f.update(mc.MODELS_VAR[curBasin.model].get_dict_from_matrix_and_ids(iv_matrix, list(ids),type_of_var=iv.DEFAULT_VAR))
3051
+ else:
3052
+ all_f = {}
3053
+ cur_fracts = curBasin.get_flow_fractions(all_f=all_f, interval=intervals, summary="mean")
3054
+ return cur_fracts
3055
+
3056
+ def _get_max_flow_fractions(self, idLauncher:int=0, stationOut:str="",
3057
+ intervals:list[tuple[datetime.datetime, datetime.datetime]]=[],
3058
+ from_full_matrix:tuple[np.ndarray, np.ndarray]=None) -> dict[list[str], list[float]]:
3059
+
3060
+ curCatch:Catchment = self.myCases[idLauncher].refCatchment
3061
+ cur_key = curCatch.get_key_catchmentDict(stationOut)
3062
+ curBasin: SubBasin = curCatch.catchmentDict[cur_key]
3063
+ if type(curBasin) != SubBasin:
3064
+ logging.warning("The current module is not a SubBasin object!")
3065
+ return None
3066
+ if from_full_matrix is not None:
3067
+ ids, iv_matrix = from_full_matrix
3068
+ all_f = mc.MODELS_VAR[curBasin.model].get_dict_from_matrix_and_ids(iv_matrix, list(ids),type_of_var=iv.FINAL_OUT_VAR)
3069
+ all_f.update(mc.MODELS_VAR[curBasin.model].get_dict_from_matrix_and_ids(iv_matrix, list(ids),type_of_var=iv.DEFAULT_VAR))
3070
+ else:
3071
+ all_f = {}
3072
+ cur_fracts = curBasin.get_flow_fractions(all_f=all_f, interval=intervals, summary="max")
3073
+ cur_fracts = {"max"+key: value for key, value in cur_fracts.items()}
2851
3074
  return cur_fracts
2852
3075
 
2853
3076
 
2854
3077
  def _get_punctual_reservoir_fractions(self, eval_date:datetime.datetime,
2855
- idLauncher:int=0, stationOut:str="") -> dict[list[str], list[float]]:
3078
+ idLauncher:int=0, stationOut:str="",
3079
+ from_full_matrix:tuple[np.ndarray, np.ndarray]=None) -> dict[list[str], list[float]]:
2856
3080
 
2857
3081
  curCatch:Catchment = self.myCases[idLauncher].refCatchment
2858
3082
  cur_key = curCatch.get_key_catchmentDict(stationOut)
@@ -2860,10 +3084,15 @@ class Optimisation(wx.Frame):
2860
3084
  if type(curBasin) != SubBasin:
2861
3085
  logging.warning("The current module is not a SubBasin object!")
2862
3086
  return None
3087
+ if from_full_matrix is not None:
3088
+ ids, iv_matrix = from_full_matrix
3089
+ all_iv = mc.MODELS_VAR[curBasin.model].get_dict_from_matrix_and_ids(iv_matrix, list(ids),type_of_var=iv.IV_VAR)
3090
+ else:
3091
+ all_iv = {}
2863
3092
  linked_params = mc.MODELS_VAR[curBasin.model].get_all_linked_params()
2864
3093
  i_params = self._get_key_from_type_all_parameters(list(linked_params.values()))
2865
3094
  max_params = {var_name: self.myParams[i_params[param_id]]["value"] for var_name, param_id in linked_params.items()}
2866
- cur_fracts = curBasin.get_iv_fractions_one_date(max_params=max_params, eval_date=eval_date)
3095
+ cur_fracts = curBasin.get_iv_fractions_one_date(all_iv=all_iv,max_params=max_params, eval_date=eval_date)
2867
3096
  return cur_fracts
2868
3097
 
2869
3098
 
@@ -2938,7 +3167,7 @@ class Optimisation(wx.Frame):
2938
3167
 
2939
3168
  for iOpti in range(len(sortJct)):
2940
3169
  stationOut = sortJct[iOpti]
2941
- filename = os.path.join(self.workingDir, stationOut+"_tests.xlsx")
3170
+ filename = self.workingDir / (stationOut+"_tests.xlsx")
2942
3171
  if os.path.isfile(filename):
2943
3172
  df = pd.read_excel(filename, sheet_name=stationOut)
2944
3173
  # Plot the physical properties
@@ -2953,7 +3182,7 @@ class Optimisation(wx.Frame):
2953
3182
  ax.set_ylabel(y_label+" [-]")
2954
3183
  ax.set_title("Proportion of rain : "+stationOut)
2955
3184
  ax.legend()
2956
- fig.savefig(os.path.join(self.workingDir, "Equifinality_physical_prop_"+stationOut+".png"))
3185
+ fig.savefig( self.workingDir / ("Equifinality_physical_prop_"+stationOut+".png"))
2957
3186
  # Plot the physical property volumes
2958
3187
  fig, ax = plt.subplots()
2959
3188
  for cur_prop, cur_color in zip(physical_properties_vol, colors_properties):
@@ -2966,7 +3195,7 @@ class Optimisation(wx.Frame):
2966
3195
  ax.set_ylabel(y_label+" [-]")
2967
3196
  ax.set_title("Proportion of rain volume : "+stationOut)
2968
3197
  ax.legend()
2969
- fig.savefig(os.path.join(self.workingDir, "Equifinality_physical_prop_volumes_"+stationOut+".png"))
3198
+ fig.savefig(self.workingDir / ("Equifinality_physical_prop_volumes_"+stationOut+".png"))
2970
3199
  # Plot the Probability of exceedance
2971
3200
  cur_color = colors_properties[0]
2972
3201
  x_label = "P. of exceedance"
@@ -2977,7 +3206,7 @@ class Optimisation(wx.Frame):
2977
3206
  ax.set_ylabel(y_label+" [-]")
2978
3207
  ax.set_title("Probability of Q_sim > Q_meas : "+stationOut)
2979
3208
  ax.legend()
2980
- fig.savefig(os.path.join(self.workingDir, "Equifinality_prob_excess_"+stationOut+".png"))
3209
+ fig.savefig(self.workingDir / ("Equifinality_prob_excess_"+stationOut+".png"))
2981
3210
  # Plot Q_sim/Q_max
2982
3211
  x_label = "Qmax_simul/Q_max_measure"
2983
3212
  fig, ax = plt.subplots()
@@ -2987,7 +3216,7 @@ class Optimisation(wx.Frame):
2987
3216
  ax.set_ylabel(y_label+" [-]")
2988
3217
  ax.set_title("Peak analysis : "+stationOut)
2989
3218
  ax.legend()
2990
- fig.savefig(os.path.join(self.workingDir, "Equifinality_peaks_ratio_"+stationOut+".png"))
3219
+ fig.savefig(self.workingDir / ("Equifinality_peaks_ratio_"+stationOut+".png"))
2991
3220
  # Plot % of the max runoff
2992
3221
  x_label = "% max runoff"
2993
3222
  fig, ax = plt.subplots()
@@ -2997,7 +3226,7 @@ class Optimisation(wx.Frame):
2997
3226
  ax.set_ylabel(y_label+" [-]")
2998
3227
  ax.set_title("Max runoff [%] : "+stationOut)
2999
3228
  ax.legend()
3000
- fig.savefig(os.path.join(self.workingDir, "Equifinality_max_runoff_"+stationOut+".png"))
3229
+ fig.savefig(self.workingDir / ("Equifinality_max_runoff_"+stationOut+".png"))
3001
3230
  else:
3002
3231
  logging.error("The file "+filename+" does not exist!")
3003
3232
 
@@ -3017,13 +3246,13 @@ class Optimisation(wx.Frame):
3017
3246
 
3018
3247
  for iOpti in range(len(sortJct)):
3019
3248
  stationOut = sortJct[iOpti]
3020
- filename = os.path.join(self.workingDir, stationOut+"_tests.xlsx")
3249
+ filename = self.workingDir / (stationOut+"_tests.xlsx")
3021
3250
  if os.path.isfile(filename):
3022
3251
  df = pd.read_excel(filename, sheet_name=stationOut)
3023
3252
  # Plot the physical properties
3024
3253
  fig, ax = plt.subplots()
3025
3254
  for cur_prop, cur_color in zip(physical_properties, colors_properties):
3026
- cur_columns = [col for col in df.columns if cur_prop in col.replace(" ", "")]
3255
+ cur_columns = [col for col in df.columns if cur_prop in col.replace(" ", "").lower()]
3027
3256
  if cur_columns != []:
3028
3257
  corr_prop = cur_columns[0]
3029
3258
  ax.scatter(df.loc[:,corr_prop], df.loc[:,y_label], s=0.5, c=cur_color,
@@ -3032,7 +3261,7 @@ class Optimisation(wx.Frame):
3032
3261
  ax.set_ylabel(y_label+" [-]")
3033
3262
  ax.set_title("Proportion of rain : "+stationOut)
3034
3263
  ax.legend()
3035
- fig.savefig(os.path.join(self.workingDir, "Equifinality_physical_prop_"+stationOut+".png"))
3264
+ fig.savefig(self.workingDir / ("Equifinality_physical_prop_"+stationOut+".png"))
3036
3265
  # Plot the Probability of exceedance
3037
3266
  cur_color = colors_properties[0]
3038
3267
  x_label = "P. of exceedance"
@@ -3043,7 +3272,7 @@ class Optimisation(wx.Frame):
3043
3272
  ax.set_ylabel(y_label+" [-]")
3044
3273
  ax.set_title("Probability of Q_sim > Q_meas : "+stationOut)
3045
3274
  ax.legend()
3046
- fig.savefig(os.path.join(self.workingDir, "Equifinality_prob_excess_"+stationOut+".png"))
3275
+ fig.savefig(self.workingDir / ("Equifinality_prob_excess_"+stationOut+".png"))
3047
3276
  # Plot Q_sim/Q_max
3048
3277
  x_label = "Qmax_simul/Q_max_measure"
3049
3278
  fig, ax = plt.subplots()
@@ -3053,7 +3282,7 @@ class Optimisation(wx.Frame):
3053
3282
  ax.set_ylabel(y_label+" [-]")
3054
3283
  ax.set_title("Peak analysis : "+stationOut)
3055
3284
  ax.legend()
3056
- fig.savefig(os.path.join(self.workingDir, "Equifinality_peaks_ratio_"+stationOut+".png"))
3285
+ fig.savefig(self.workingDir / ("Equifinality_peaks_ratio_"+stationOut+".png"))
3057
3286
 
3058
3287
  else:
3059
3288
  logging.error("The file "+filename+" does not exist!")
@@ -3068,7 +3297,7 @@ class Optimisation(wx.Frame):
3068
3297
  launcherDir = self.optiParam.get_param("Cases","dir_"+str(i+1))
3069
3298
  isOk, launcherDir = check_path(launcherDir, prefix=self.workingDir, applyCWD=True)
3070
3299
  if isOk<0:
3071
- print("ERROR : in path of launcherDir")
3300
+ logging.error("ERROR : in path of launcherDir")
3072
3301
  newCase.read_param(launcherDir, copyDefault=False, callback=self.update_parameters_launcher)
3073
3302
  # FIXME TO CHANGE when seperation with the GUI
3074
3303
  if self.wx_exists:
@@ -3085,19 +3314,19 @@ class Optimisation(wx.Frame):
3085
3314
  refDir = newCase.launcherParam.get_param("Calculs","Répertoire simulation de référence")
3086
3315
  isOk, refDir = check_path(refDir, prefix=launcherDir, applyCWD=True)
3087
3316
  if isOk<0:
3088
- print("ERROR : in path of launcherDir")
3089
- newCase.mydro = HydrologyModel(dir=refDir)
3317
+ logging.error("ERROR : in path of launcherDir")
3318
+ newCase.mydro = HydrologyModel(directory=refDir)
3090
3319
  newCase.mydro.Hide()
3091
3320
  self.Bind(wx.EVT_MENU, newCase.show_mydro, guiHydroCase)
3092
3321
  curCase = paramMenu.Append(newId, curName, caseMenu)
3093
3322
  else:
3094
- print("WARNING : this scenario was not implemented yet. This might induce an error!")
3323
+ logging.warning(_("WARNING : this scenario was not implemented yet. This might induce an error!"))
3095
3324
  # iItem =
3096
3325
  curCase = paramMenu.Replace(iItem)
3097
3326
  else:
3098
3327
  refDir = newCase.launcherParam.get_param("Calculs","Répertoire simulation de référence")
3099
3328
  isOk, refDir = check_path(refDir, prefix=launcherDir, applyCWD=True)
3100
- newCase.mydro = HydrologyModel(dir=refDir)
3329
+ newCase.mydro = HydrologyModel(directory=refDir)
3101
3330
 
3102
3331
  self.Bind(wx.EVT_MENU, newCase.show_launcherParam, curCase)
3103
3332
  newCase.idMenuItem = newId
@@ -3158,7 +3387,7 @@ class Optimisation(wx.Frame):
3158
3387
  all_outlets[stationOut][i,:] = curBasin.outFlow
3159
3388
  # Small test
3160
3389
  cur_obj = all_params[i, -1]
3161
- print("cur_obj : ", cur_obj, " ; cur_obj2 : ", cur_obj2)
3390
+ logging.info(_("cur_obj : %s ; cur_obj2 : %s"), cur_obj, cur_obj2)
3162
3391
  if cur_obj != cur_obj2:
3163
3392
  logging.error("The objective function is not the same as the one computed by the model!")
3164
3393
  logging.error("cur_obj : "+str(cur_obj)+" ; cur_obj2 : "+str(cur_obj2))
@@ -3217,7 +3446,7 @@ class Optimisation(wx.Frame):
3217
3446
 
3218
3447
  # FIXME : this function has been dashed off -> functionnal but not well written!!
3219
3448
  # TODO : to improve !!!!!!
3220
- def launch_models_propertie_with_Nash(self, event, idLauncher:int=0, idOpti:int=1, quantile_Nash:float=0.01, std_Nash:float=0.03, clustering_Nash:bool=True,
3449
+ def launch_models_propertie_with_Nash_old(self, event, idLauncher:int=0, idOpti:int=1, quantile_Nash:float=0.01, std_Nash:float=0.03, clustering_Nash:bool=True,
3221
3450
  save_every:int=100, restart_from_file:bool=True):
3222
3451
  """
3223
3452
  Analyse the properties of the model and compare them with the Nash coefficient.
@@ -3248,7 +3477,7 @@ class Optimisation(wx.Frame):
3248
3477
  # Activate the writing of the internal variables
3249
3478
  curCatch.activate_all_internal_variables()
3250
3479
  # Prepare the Excel writer
3251
- writer_tot = pd.ExcelWriter(os.path.join(self.workingDir, "all_best_tests.xlsx"), engine = 'xlsxwriter')
3480
+ writer_tot = pd.ExcelWriter(self.workingDir / "all_best_tests.xlsx", engine = 'xlsxwriter')
3252
3481
 
3253
3482
  for iOpti in range(len(sortJct)):
3254
3483
  stationOut = sortJct[iOpti]
@@ -3293,7 +3522,7 @@ class Optimisation(wx.Frame):
3293
3522
  cur_p = all_params[i, :-1]
3294
3523
  cur_obj = all_params[i, -1]
3295
3524
  cur_obj2 = self.evaluate_model_optimizer(cur_p, idOpti=idOpti)
3296
- print("cur_obj : ", cur_obj, " ; cur_obj2 : ", cur_obj2)
3525
+ logging.info(_("cur_obj : %s ; cur_obj2 : %s"), cur_obj, cur_obj2)
3297
3526
  if cur_obj != cur_obj2:
3298
3527
  logging.error("The objective function is not the same as the one computed by the model!")
3299
3528
  logging.error("cur_obj : "+str(cur_obj)+" ; cur_obj2 : "+str(cur_obj2))
@@ -3301,6 +3530,7 @@ class Optimisation(wx.Frame):
3301
3530
  self.write_mesh_results_optimizer(idOpti=idOpti)
3302
3531
  # Save all the variables/evaluations desired
3303
3532
  frac_flow_dict = self._get_flow_fractions(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
3533
+ max_flow_dict = self._get_max_flow_fractions(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
3304
3534
  init_iv = self._get_punctual_reservoir_fractions(eval_date=cur_intervals[0][0], idLauncher=idLauncher, stationOut=stationOut)
3305
3535
  p_excess = self._get_exceedance(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
3306
3536
  max_sim_obs = self._get_ratio_max_sim_obs(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
@@ -3312,6 +3542,7 @@ class Optimisation(wx.Frame):
3312
3542
  cur_all_frac = (list(cur_p)
3313
3543
  + cur_timeDelays
3314
3544
  + list(frac_flow_dict.values())
3545
+ + list(max_flow_dict.values())
3315
3546
  + list(init_iv.values())
3316
3547
  + [p_excess, max_sim_obs, cur_obj])
3317
3548
  all_frac.append(cur_all_frac)
@@ -3321,30 +3552,203 @@ class Optimisation(wx.Frame):
3321
3552
  var_names = names \
3322
3553
  + list(all_timeDelays_str.keys()) \
3323
3554
  + list(frac_flow_dict.keys()) \
3555
+ + list(max_flow_dict.keys()) \
3324
3556
  + list(init_iv.keys()) \
3325
3557
  + ["P. of exceedance", "Qmax_simul/Q_max_measure", "Nash"]
3326
3558
  cur_df = pd.DataFrame(all_frac, columns=var_names)
3327
3559
  # write first the tempory results for each station
3328
- writer_stat = pd.ExcelWriter(os.path.join(self.workingDir, stationOut+"_tests.xlsx"), engine = 'xlsxwriter')
3560
+ writer_stat = pd.ExcelWriter(self.workingDir / (stationOut+"_tests.xlsx"), engine = 'xlsxwriter')
3329
3561
  cur_df.to_excel(writer_stat, sheet_name=stationOut, columns=var_names)
3330
3562
  writer_stat.sheets[stationOut].autofit()
3331
3563
  writer_stat.close()
3332
3564
 
3333
3565
  # Save the evaluations
3334
- var_names = names \
3335
- + list(all_timeDelays_str.keys()) \
3336
- + list(frac_flow_dict.keys()) \
3337
- + list(init_iv.keys()) \
3338
- + ["P. of exceedance", "Qmax_simul/Q_max_measure", "Nash"]
3339
- cur_df = pd.DataFrame(all_frac, columns=var_names)
3340
- # write first the tempory results for each station
3341
- writer_stat = pd.ExcelWriter(os.path.join(self.workingDir, stationOut+"_tests.xlsx"), engine = 'xlsxwriter')
3342
- cur_df.to_excel(writer_stat, sheet_name=stationOut, columns=var_names)
3343
- writer_stat.sheets[stationOut].autofit()
3344
- writer_stat.close()
3345
- # write now the informations for all the stations in the same excel file
3346
- cur_df.to_excel(writer_tot, sheet_name=stationOut, columns=var_names)
3347
- writer_tot.sheets[stationOut].autofit()
3566
+ if(len(all_params))>0:
3567
+ var_names = names \
3568
+ + list(all_timeDelays_str.keys()) \
3569
+ + list(frac_flow_dict.keys()) \
3570
+ + list(max_flow_dict.keys()) \
3571
+ + list(init_iv.keys()) \
3572
+ + ["P. of exceedance", "Qmax_simul/Q_max_measure", "Nash"]
3573
+ cur_df = pd.DataFrame(all_frac, columns=var_names)
3574
+ # write first the tempory results for each station
3575
+ writer_stat = pd.ExcelWriter(self.workingDir / (stationOut+"_tests.xlsx"), engine = 'xlsxwriter')
3576
+ cur_df.to_excel(writer_stat, sheet_name=stationOut, columns=var_names)
3577
+ writer_stat.sheets[stationOut].autofit()
3578
+ writer_stat.close()
3579
+ # write now the informations for all the stations in the same excel file
3580
+ cur_df.to_excel(writer_tot, sheet_name=stationOut, columns=var_names)
3581
+ writer_tot.sheets[stationOut].autofit()
3582
+
3583
+ ## =======
3584
+ ## =======
3585
+ # Collect the best parameters and their objective function(s)
3586
+ best_params = self.apply_optim(None)
3587
+ # Simulation with the best parameters
3588
+ self.compute_distributed_hydro_model()
3589
+ # Update myHydro of all effective subbasins to get the best configuration upstream
3590
+ curCatch.read_hydro_eff_subBasin()
3591
+ # Update timeDelays according to time wolf_array
3592
+ self.apply_timeDelay_dist(idOpti=idOpti, idLauncher=idLauncher, junctionKey=stationOut)
3593
+ # Update the outflows
3594
+ curCatch.update_hydro(idCompar=0)
3595
+
3596
+ # All upstream elements of a reference will be fixed
3597
+ doneList.append(stationOut)
3598
+ previousLevel = curCatch.levelOut
3599
+
3600
+ writer_tot.close()
3601
+ logging.info("The equifinality test is finished!")
3602
+
3603
+ # FIXME : this function has been dashed off -> functionnal but not well written!!
3604
+ # TODO : to improve !!!!!!
3605
+ def launch_models_propertie_with_Nash_old2(self, event, idLauncher:int=0, idOpti:int=1, quantile_Nash:float=0.01, std_Nash:float=0.03, clustering_Nash:bool=True,
3606
+ save_every:int=1000, restart_from_file:bool=True,
3607
+ intervals:list[tuple[datetime.datetime, datetime.datetime]]=[]):
3608
+ """
3609
+ Analyse the properties of the model and compare them with the Nash coefficient.
3610
+
3611
+ Args:
3612
+ idLauncher (int, optional): The id of the launcher. Defaults to 0.
3613
+
3614
+ Returns:
3615
+ None
3616
+
3617
+ Raises:
3618
+ None
3619
+ """
3620
+ curCatch:Catchment = self.myCases[idLauncher].refCatchment
3621
+
3622
+ onlyOwnSub = self.optiParam.get_param("Semi-Distributed", "Own_SubBasin")
3623
+ if onlyOwnSub is None:
3624
+ onlyOwnSub = False
3625
+ doneList = []
3626
+ previousLevel = 1
3627
+ # Collect sort and save the compare stations
3628
+ self.set_compare_stations(idLauncher=idLauncher)
3629
+ sortJct = self.myStations
3630
+ # Get the initial number of intervals
3631
+ # -> these can evolve according to the measurement available at each station
3632
+ is_ok = self._save_opti_intervals()
3633
+ all_intervals = self.all_intervals
3634
+ # Activate the writing of the internal variables
3635
+ curCatch.activate_all_internal_variables()
3636
+ # Prepare the Excel writer
3637
+ writer_tot = pd.ExcelWriter(self.workingDir / "all_best_tests.xlsx", engine = 'xlsxwriter')
3638
+
3639
+ for iOpti in range(len(sortJct)):
3640
+ stationOut = sortJct[iOpti]
3641
+ logging.info("==================")
3642
+ logging.info("Station : "+stationOut)
3643
+ # Build the current compare.txt file and replace all nan values by 0.0
3644
+ self.save_current_compare_file(stationOut=stationOut)
3645
+ # Save the name of the station that will be the output
3646
+ curCatch.define_station_out(stationOut)
3647
+ # Activate all the useful subs and write it in the param file
3648
+ curCatch.activate_usefulSubs(blockJunction=doneList, onlyItself=onlyOwnSub)
3649
+ # Select correct calibration intervals -> remove the intervals with NaN
3650
+ cur_intervals = self.select_opti_intervals(all_intervals=all_intervals, stationOut=stationOut, filter_nan=True)
3651
+ flood_intervals = (date(2021, 7, 13, 0, 0, 0, tzinfo=datetime.timezone.utc),
3652
+ date(2021, 7, 17, 0, 0, 0, tzinfo=datetime.timezone.utc))
3653
+ phys_prop_intervals = self._intersect_intervals(cur_intervals, flood_intervals)
3654
+ self.save_opti_dates_to_file(cur_intervals)
3655
+ # Rename the result file
3656
+ self.optiParam.change_param("Optimizer", "fname", stationOut)
3657
+ self.optiParam.SavetoFile(None)
3658
+ self.optiParam.Reload(None)
3659
+ self.update_myParams(idLauncher)
3660
+ # Prepare the paramPy dictionnary before calibration
3661
+ self.prepare_calibration_timeDelay(stationOut=stationOut)
3662
+ # Reload the useful modules
3663
+ self.reload_hydro(idCompar=0, fromStation=stationOut, lastLevel=previousLevel, updateAll=True)
3664
+ ## =======
3665
+ ## Init
3666
+ ## =======
3667
+ self.init_optimizer(idOpti)
3668
+ self.associate_ptr(None, idOpti=idOpti)
3669
+ # Get the best parameters to test
3670
+ all_params = self.get_best_params(stationOut=stationOut, quantile=quantile_Nash, std=std_Nash, rmv_near_max=1e-4, apply_clustering=clustering_Nash)
3671
+ ## =======
3672
+ ## Compute
3673
+ ## =======
3674
+ all_frac = []
3675
+ ids = None
3676
+ iv_matrix = None
3677
+ # Check if the excel file already exists and load it to check if some parameters have already been tested
3678
+ if restart_from_file:
3679
+ all_frac, all_params = self._reload_model_analysis(stationOut=stationOut, all_params=all_params)
3680
+ # Get param names
3681
+ names = self.get_param_names(idLauncher=idLauncher, stationOut=stationOut)
3682
+ logging.info("The number of sets of parameters to test are : "+str(len(all_params)))
3683
+ for i in tqdm(range(len(all_params))):
3684
+ cur_p = all_params[i, :-1]
3685
+ cur_obj = all_params[i, -1]
3686
+ cur_obj2 = self.evaluate_model_optimizer(cur_p, idOpti=idOpti)
3687
+ logging.info(_("cur_obj : %s ; cur_obj2 : %s"), cur_obj, cur_obj2)
3688
+ if cur_obj != cur_obj2:
3689
+ logging.error(_("The objective function is not the same as the one computed by the model!"))
3690
+ logging.error(_("cur_obj : %s ; cur_obj2 : %s"), cur_obj, cur_obj2)
3691
+ # Recover the full matrix from Fortran
3692
+ if ids is None or iv_matrix is None:
3693
+ iv_data = None
3694
+ else:
3695
+ iv_data = (ids, iv_matrix)
3696
+ ids, iv_matrix = self.get_all_activated_iv(idOpti=idOpti, idLauncher=idLauncher, iv_variables=iv_data)
3697
+ # Save all the variables/evaluations desired
3698
+ frac_flow_dict = self._get_flow_fractions(idLauncher=idLauncher, stationOut=stationOut, intervals=phys_prop_intervals,
3699
+ from_full_matrix=(ids, iv_matrix))
3700
+ max_flow_dict = self._get_max_flow_fractions(idLauncher=idLauncher, stationOut=stationOut, intervals=phys_prop_intervals,
3701
+ from_full_matrix=(ids, iv_matrix))
3702
+ init_iv = self._get_punctual_reservoir_fractions(eval_date=cur_intervals[0][0], idLauncher=idLauncher, stationOut=stationOut,
3703
+ from_full_matrix=(ids, iv_matrix))
3704
+ p_excess = self._get_exceedance(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
3705
+ max_sim_obs = self._get_ratio_max_sim_obs(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
3706
+ # Extract the time delays
3707
+ all_timeDelays = curCatch.get_timeDelays_inlets(ref=stationOut)
3708
+ all_timeDelays_str = {key : str(datetime.timedelta(seconds=all_timeDelays[key])) for key in all_timeDelays}
3709
+ cur_timeDelays = list(all_timeDelays_str.values())
3710
+ # Concatenate all the informations
3711
+ cur_all_frac = (list(cur_p)
3712
+ + cur_timeDelays
3713
+ + list(frac_flow_dict.values())
3714
+ + list(max_flow_dict.values())
3715
+ + list(init_iv.values())
3716
+ + [p_excess, max_sim_obs, cur_obj])
3717
+ all_frac.append(cur_all_frac)
3718
+ # Periodically save the evaluations in case of trouble
3719
+ if (i + 1) % save_every == 0:
3720
+ # Save the evaluations
3721
+ var_names = names \
3722
+ + list(all_timeDelays_str.keys()) \
3723
+ + list(frac_flow_dict.keys()) \
3724
+ + list(max_flow_dict.keys()) \
3725
+ + list(init_iv.keys()) \
3726
+ + ["P. of exceedance", "Qmax_simul/Q_max_measure", "Nash"]
3727
+ cur_df = pd.DataFrame(all_frac, columns=var_names)
3728
+ # write first the tempory results for each station
3729
+ writer_stat = pd.ExcelWriter(self.workingDir / (stationOut+"_tests.xlsx"), engine = 'xlsxwriter')
3730
+ cur_df.to_excel(writer_stat, sheet_name=stationOut, columns=var_names)
3731
+ writer_stat.sheets[stationOut].autofit()
3732
+ writer_stat.close()
3733
+
3734
+
3735
+ # Save the evaluations
3736
+ if(len(all_params))>0:
3737
+ var_names = names \
3738
+ + list(all_timeDelays_str.keys()) \
3739
+ + list(frac_flow_dict.keys()) \
3740
+ + list(max_flow_dict.keys()) \
3741
+ + list(init_iv.keys()) \
3742
+ + ["P. of exceedance", "Qmax_simul/Q_max_measure", "Nash"]
3743
+ cur_df = pd.DataFrame(all_frac, columns=var_names)
3744
+ # write first the tempory results for each station
3745
+ writer_stat = pd.ExcelWriter(self.workingDir / (stationOut+"_tests.xlsx"), engine = 'xlsxwriter')
3746
+ cur_df.to_excel(writer_stat, sheet_name=stationOut, columns=var_names)
3747
+ writer_stat.sheets[stationOut].autofit()
3748
+ writer_stat.close()
3749
+ # write now the informations for all the stations in the same excel file
3750
+ cur_df.to_excel(writer_tot, sheet_name=stationOut, columns=var_names)
3751
+ writer_tot.sheets[stationOut].autofit()
3348
3752
 
3349
3753
  ## =======
3350
3754
  ## =======
@@ -3366,6 +3770,159 @@ class Optimisation(wx.Frame):
3366
3770
  writer_tot.close()
3367
3771
  logging.info("The equifinality test is finished!")
3368
3772
 
3773
+
3774
+ # FIXME : this function has been dashed off -> functionnal but not well written!!
3775
+ # TODO : to improve !!!!!!
3776
+ def launch_models_properties_with_Nash(self, event, idLauncher:int=0, idOpti:int=1, quantile_Nash:float=0.01, std_Nash:float=0.03, clustering_Nash:bool=False,
3777
+ save_every:int=1000, restart_from_file:bool=False,
3778
+ evaluation_interval:tuple[datetime.datetime, datetime.datetime]=[]):
3779
+ """
3780
+ Analyse the properties of the model and compare them with the Nash coefficient.
3781
+
3782
+ Args:
3783
+ idLauncher (int, optional): The id of the launcher. Defaults to 0.
3784
+
3785
+ Returns:
3786
+ None
3787
+
3788
+ Raises:
3789
+ None
3790
+ """
3791
+ curCatch:Catchment = self.myCases[idLauncher].refCatchment
3792
+ try:
3793
+ onlyOwnSub = self.optiParam.get_param("Semi-Distributed", "Own_SubBasin")
3794
+ if onlyOwnSub is None:
3795
+ onlyOwnSub = False
3796
+ doneList = []
3797
+ previousLevel = 1
3798
+ # Collect sort and save the compare stations
3799
+ self.set_compare_stations(idLauncher=idLauncher)
3800
+ sortJct = self.myStations
3801
+ # Get the initial number of intervals
3802
+ # -> these can evolve according to the measurement available at each station
3803
+ self.all_intervals = self._read_opti_intervals(idLauncher=idLauncher)
3804
+ all_intervals = self.all_intervals
3805
+ # Activate the writing of the internal variables
3806
+ curCatch.activate_all_internal_variables()
3807
+ # Prepare the Excel writer
3808
+ writer_tot = pd.ExcelWriter(self.workingDir / "all_best_tests.xlsx", engine = 'xlsxwriter')
3809
+
3810
+ for iOpti in range(len(sortJct)):
3811
+ stationOut = sortJct[iOpti]
3812
+ logging.info("==================")
3813
+ logging.info("Station : "+stationOut)
3814
+ isOk, cur_intervals = self.prepare_optimize_model_F_one_station(stationOut=stationOut, idLauncher=idLauncher, idOpti=idOpti,
3815
+ all_intervals=all_intervals, already_done_subbasins=doneList,
3816
+ onlyOwnSub=onlyOwnSub, previousLevel=previousLevel,
3817
+ return_intervals=True)
3818
+ # Get the best parameters to test
3819
+ all_params = self.get_best_params(stationOut=stationOut, quantile=None, std=0.1, rmv_near_max=None, apply_clustering=clustering_Nash)
3820
+ ## =======
3821
+ ## Compute loop
3822
+ ## =======
3823
+ # FIXME : if no list, just keep the cur_intervals and also no intervals intersection application
3824
+ if evaluation_interval == []:
3825
+ evaluation_interval = (date(2021, 7, 13, 0, 0, 0, tzinfo=datetime.timezone.utc),
3826
+ date(2021, 7, 17, 0, 0, 0, tzinfo=datetime.timezone.utc))
3827
+ phys_prop_intervals = self._intersect_intervals(cur_intervals, evaluation_interval)
3828
+ all_frac = []
3829
+ ids = None
3830
+ iv_matrix = None
3831
+ # Check if the excel file already exists and load it to check if some parameters have already been tested
3832
+ if restart_from_file:
3833
+ all_frac, all_params = self._reload_model_analysis(stationOut=stationOut, all_params=all_params)
3834
+ # Get param names
3835
+ names = self.get_param_names(idLauncher=idLauncher, stationOut=stationOut)
3836
+ logging.info("The number of sets of parameters to test are : "+str(len(all_params)))
3837
+ for i in tqdm(range(len(all_params))):
3838
+ cur_p = all_params[i, :-1]
3839
+ cur_obj = all_params[i, -1]
3840
+ cur_obj2 = self.evaluate_model_optimizer(cur_p, idOpti=idOpti)
3841
+ logging.info(_("cur_obj : %s ; cur_obj2 : %s"), cur_obj, cur_obj2)
3842
+ if cur_obj != cur_obj2:
3843
+ logging.error(_("The objective function is not the same as the one computed by the model!"))
3844
+ logging.error(_("cur_obj : %s ; cur_obj2 : %s"), cur_obj, cur_obj2)
3845
+ # Recover the full matrix from Fortran
3846
+ if ids is None or iv_matrix is None:
3847
+ iv_data = None
3848
+ else:
3849
+ iv_data = (ids, iv_matrix)
3850
+ ids, iv_matrix = self.get_all_activated_iv(idOpti=idOpti, idLauncher=idLauncher, iv_variables=iv_data)
3851
+ # Save all the variables/evaluations desired
3852
+ frac_flow_dict = self._get_flow_fractions(idLauncher=idLauncher, stationOut=stationOut, intervals=phys_prop_intervals,
3853
+ from_full_matrix=(ids, iv_matrix))
3854
+ max_flow_dict = self._get_max_flow_fractions(idLauncher=idLauncher, stationOut=stationOut, intervals=phys_prop_intervals,
3855
+ from_full_matrix=(ids, iv_matrix))
3856
+ init_iv = self._get_punctual_reservoir_fractions(eval_date=cur_intervals[0][0], idLauncher=idLauncher, stationOut=stationOut,
3857
+ from_full_matrix=(ids, iv_matrix))
3858
+ p_excess = self._get_exceedance(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
3859
+ max_sim_obs = self._get_ratio_max_sim_obs(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
3860
+ # Extract the time delays
3861
+ all_timeDelays = curCatch.get_timeDelays_inlets(ref=stationOut)
3862
+ all_timeDelays_str = {key : str(datetime.timedelta(seconds=all_timeDelays[key])) for key in all_timeDelays}
3863
+ cur_timeDelays = list(all_timeDelays_str.values())
3864
+ # Concatenate all the informations
3865
+ cur_all_frac = (list(cur_p)
3866
+ + cur_timeDelays
3867
+ + list(frac_flow_dict.values())
3868
+ + list(max_flow_dict.values())
3869
+ + list(init_iv.values())
3870
+ + [p_excess, max_sim_obs, cur_obj])
3871
+ all_frac.append(cur_all_frac)
3872
+ # Periodically save the evaluations in case of trouble
3873
+ if (i + 1) % save_every == 0:
3874
+ # Save the evaluations
3875
+ var_names = names \
3876
+ + list(all_timeDelays_str.keys()) \
3877
+ + list(frac_flow_dict.keys()) \
3878
+ + list(max_flow_dict.keys()) \
3879
+ + list(init_iv.keys()) \
3880
+ + ["P. of exceedance", "Qmax_simul/Q_max_measure", "Nash"]
3881
+ cur_df = pd.DataFrame(all_frac, columns=var_names)
3882
+ # write first the tempory results for each station
3883
+ writer_stat = pd.ExcelWriter(self.workingDir / (stationOut+"_tests.xlsx"), engine = 'xlsxwriter')
3884
+ cur_df.to_excel(writer_stat, sheet_name=stationOut, columns=var_names)
3885
+ writer_stat.sheets[stationOut].autofit()
3886
+ writer_stat.close()
3887
+
3888
+
3889
+ # Save the evaluations
3890
+ if(len(all_params))>0:
3891
+ var_names = names \
3892
+ + list(all_timeDelays_str.keys()) \
3893
+ + list(frac_flow_dict.keys()) \
3894
+ + list(max_flow_dict.keys()) \
3895
+ + list(init_iv.keys()) \
3896
+ + ["P. of exceedance", "Qmax_simul/Q_max_measure", "Nash"]
3897
+ cur_df = pd.DataFrame(all_frac, columns=var_names)
3898
+ # write first the tempory results for each station
3899
+ writer_stat = pd.ExcelWriter(self.workingDir / (stationOut+"_tests.xlsx"), engine = 'xlsxwriter')
3900
+ cur_df.to_excel(writer_stat, sheet_name=stationOut, columns=var_names)
3901
+ writer_stat.sheets[stationOut].autofit()
3902
+ writer_stat.close()
3903
+ # write now the informations for all the stations in the same excel file
3904
+ cur_df.to_excel(writer_tot, sheet_name=stationOut, columns=var_names)
3905
+ writer_tot.sheets[stationOut].autofit()
3906
+
3907
+ ## =======
3908
+ ## =======
3909
+ # Reset the configuration of the optimal parameters in all file and in all variables
3910
+ self.reload_optimal_subbasin(idLauncher=idLauncher, idOpti=idOpti, stationOut=stationOut)
3911
+
3912
+ # All upstream elements of a reference will be fixed
3913
+ doneList.append(stationOut)
3914
+ previousLevel = curCatch.levelOut
3915
+
3916
+ # Reset the optimisation file
3917
+ self.save_opti_dates_to_file(self.all_intervals)
3918
+ writer_tot.close()
3919
+ except Exception as e:
3920
+ logging.error("An error occurred during the model properties analysis: " + str(e))
3921
+ # Reset the optimisation file
3922
+ self.save_opti_dates_to_file(self.all_intervals)
3923
+ raise e
3924
+ logging.info("The equifinality test is finished!")
3925
+
3369
3926
  # FIXME : it might be better to pass the myParams to the CaseOpti object instead to allow parallelisation
3370
3927
  def _build_type_to_key_index(self) -> dict[int, int]:
3371
3928
  return {param["type"]: i for i, param in self.myParams.items()}
@@ -3377,6 +3934,160 @@ class Optimisation(wx.Frame):
3377
3934
  def _get_key_from_type_parameter(self, type_param:int) -> int:
3378
3935
  return next((i for i, param in self.myParams.items() if param["type"] == type_param), None)
3379
3936
 
3937
+ def _intersect_intervals(self, intervals:list[tuple[date, date]], flood_intervals:tuple[date, date]):
3938
+ result = []
3939
+ flood_start, flood_end = flood_intervals
3940
+ for int_start, int_end in intervals:
3941
+ # Find overlap
3942
+ start = max(int_start, flood_start)
3943
+ end = min(int_end, flood_end)
3944
+ if start < end: # There is an overlap
3945
+ result.append((start, end))
3946
+ return result
3947
+
3948
+ def reload_optimal_subbasin(self, idLauncher:int=0, idOpti:int=1, stationOut:str=None):
3949
+ """
3950
+ Reload the optimal subbasin for a given idLauncher and idOpti.
3951
+
3952
+ Args:
3953
+ idLauncher (int): The id of the launcher.
3954
+ idOpti (int): The id of the optimizer.
3955
+ stationOut (str): The name of the station.
3956
+
3957
+ Returns:
3958
+ None
3959
+
3960
+ Raises:
3961
+ None
3962
+ """
3963
+ # Get the current catchment
3964
+ curCatch:Catchment = self.myCases[idLauncher].refCatchment
3965
+ # Collect the best parameters and their objective function(s)
3966
+ best_params = self.apply_optim(None)
3967
+ # Simulation with the best parameters
3968
+ self.compute_distributed_hydro_model()
3969
+ # Update myHydro of all effective subbasins to get the best configuration upstream
3970
+ curCatch.read_hydro_eff_subBasin()
3971
+ # Update timeDelays according to time wolf_array
3972
+ self.apply_timeDelay_dist(idOpti=idOpti, idLauncher=idLauncher, junctionKey=stationOut)
3973
+ # Update the outflows
3974
+ curCatch.update_hydro(idCompar=0)
3975
+
3976
+
3977
+
3978
+ def init_optimize_model_f(self, idOpti:int=1):
3979
+ """
3980
+ Initialize the optimization model for a given idOpti.
3981
+
3982
+ Args:
3983
+ idOpti (int): The id of the optimizer.
3984
+
3985
+ Returns:
3986
+ None
3987
+
3988
+ Raises:
3989
+ None
3990
+ """
3991
+ # Initialize the optimizer
3992
+ self.init_optimizer(idOpti)
3993
+ # Associate the pointer to the optimizer
3994
+ self.associate_ptr(None, idOpti=idOpti)
3995
+
3996
+
3997
+ def prepare_optimize_model_F_one_station(self, idLauncher:int=0, idOpti:int=1, stationOut:str=None, all_intervals:list[tuple[date, date]]=[],
3998
+ onlyOwnSub:bool=True, previousLevel:int=1, already_done_subbasins:list[str]=[],
3999
+ return_intervals:bool=False):
4000
+ isOk = 0
4001
+ # Get the current catchment
4002
+ curCatch:Catchment = self.myCases[idLauncher].refCatchment
4003
+ # Define the outlet station and keep the same if not provided
4004
+ if stationOut is None:
4005
+ stationOut = curCatch.junctionOut
4006
+ # Select and save the compare file off the outlet station
4007
+ self.save_current_compare_file(stationOut=stationOut)
4008
+ # Save the name of the station that will be the output
4009
+ curCatch.define_station_out(stationOut)
4010
+ # Activate all the useful subs and write it in the param file
4011
+ curCatch.activate_usefulSubs(blockJunction=already_done_subbasins, onlyItself=True)
4012
+ # Select correct calibration intervals -> remove the intervals with NaN
4013
+ if all_intervals == []:
4014
+ all_intervals = self._read_opti_intervals(idLauncher=idLauncher)
4015
+ cur_intervals = self.select_opti_intervals(all_intervals=all_intervals, stationOut=stationOut, filter_nan=True)
4016
+ self.save_opti_dates_to_file(cur_intervals)
4017
+ # Rename the result file
4018
+ self.optiParam.change_param("Optimizer", "fname", stationOut)
4019
+ self.optiParam.SavetoFile(None)
4020
+ self.optiParam.Reload(None)
4021
+ self.update_myParams(idLauncher)
4022
+ # Prepare the paramPy dictionnary before calibration
4023
+ self.prepare_calibration_timeDelay(stationOut=stationOut)
4024
+ # Reload the useful modules
4025
+ self.reload_hydro(idCompar=0, fromStation=stationOut, lastLevel=previousLevel, updateAll=True)
4026
+ ## =======
4027
+ ## Init
4028
+ ## =======
4029
+ self.init_optimize_model_f(idOpti=idOpti)
4030
+
4031
+ if return_intervals:
4032
+ return isOk, cur_intervals
4033
+ else:
4034
+ return isOk
4035
+
4036
+
4037
+ def extract_hydro_from_params(self, idLauncher:int=0, idOpti:int=1, stationOut:str=None,
4038
+ all_params:np.ndarray=None):
4039
+ """
4040
+ Extract the hydro from the parameters.
4041
+
4042
+ Args:
4043
+ idLauncher (int): The id of the launcher.
4044
+ idOpti (int): The id of the optimizer.
4045
+ stationOut (str): The name of the station.
4046
+ all_params (np.ndarray): The parameters to be tested.
4047
+
4048
+ Returns:
4049
+ None
4050
+
4051
+ Raises:
4052
+ None
4053
+ """
4054
+ if all_params is None:
4055
+ all_params = self.get_best_params(stationOut=stationOut, quantile=0.01, std=0.03, rmv_near_max=1e-4, apply_clustering=True)
4056
+
4057
+ cur_catch = self.myCases[idLauncher].refCatchment
4058
+ self.all_intervals = self._read_opti_intervals(idLauncher=idLauncher)
4059
+ # Prepare the optimization model for one station
4060
+ self.prepare_optimize_model_F_one_station(idLauncher=idLauncher, idOpti=idOpti, stationOut=stationOut)
4061
+ # Init the matrix of hydrographs
4062
+ nb_tests = np.shape(all_params)[0]
4063
+ optimal_hydro = cur_catch.get_outflow()
4064
+ test_hydro = np.zeros((nb_tests, len(optimal_hydro)))
4065
+
4066
+ # Evaluate the model with the parameters
4067
+ for i in tqdm(range(nb_tests)):
4068
+ cur_p = all_params[i, :-1]
4069
+ cur_obj = all_params[i, -1]
4070
+ cur_obj2 = self.evaluate_model_optimizer(cur_p, idOpti=idOpti)
4071
+ if cur_obj != cur_obj2:
4072
+ logging.error("The objective function is not the same as the one computed by the model!")
4073
+ logging.error("cur_obj : "+str(cur_obj)+" ; cur_obj2 : "+str(cur_obj2))
4074
+ test_hydro[i, :] = cur_catch.get_outflow()
4075
+
4076
+ # Reset the configuration of the optimal parameters in all file and in all variables
4077
+ self.reload_optimal_subbasin(idLauncher=idLauncher, idOpti=idOpti, stationOut=stationOut)
4078
+ optimal_hydro_2 = cur_catch.get_outflow()
4079
+ if not np.allclose(optimal_hydro, optimal_hydro_2):
4080
+ logging.error("The optimal hydrograph is not the same as the one computed by the model!")
4081
+ logging.error("optimal_hydro : "+str(optimal_hydro)+" ; optimal_hydro_2 : "+str(optimal_hydro_2))
4082
+
4083
+ # Reset the optimisation file
4084
+ self.save_opti_dates_to_file(self.all_intervals)
4085
+ # One of the test_hydro should be the same as the optimal hydrograph
4086
+ # if np.min(test_hydro-optimal_hydro, axis=1)<1e-3:
4087
+ # return test_hydro, optimal_hydro
4088
+ # if not np.allclose(test_hydro, optimal_hydro):
4089
+
4090
+ return test_hydro, optimal_hydro
3380
4091
 
3381
4092
  def make_nd_array(self, c_pointer, shape, dtype=np.float64, order='C', own_data=True,readonly=False):
3382
4093
  arr_size = np.prod(shape[:]) * np.dtype(dtype).itemsize
@@ -3395,7 +4106,7 @@ class Optimisation(wx.Frame):
3395
4106
  else:
3396
4107
  return arr
3397
4108
 
3398
- def _reload_model_analysis(self, stationOut:str, all_params:np.ndarray):
4109
+ def _reload_model_analysis(self, stationOut:str, all_params:np.ndarray)-> tuple[list, np.ndarray]:
3399
4110
  """
3400
4111
  Reload the model analysis for a given station.
3401
4112
 
@@ -3410,11 +4121,11 @@ class Optimisation(wx.Frame):
3410
4121
  None
3411
4122
  """
3412
4123
  # Check if the excel file already exists and load it to check if some parameters have already been tested
3413
- filename = os.path.join(self.workingDir, stationOut+"_tests.xlsx")
4124
+ filename = self.workingDir / (stationOut+"_tests.xlsx")
3414
4125
  # just_params = all_params[:, :-1]
3415
4126
  nb_params = np.shape(all_params)[1] - 1
3416
4127
  if os.path.isfile(filename):
3417
- df = pd.read_excel(os.path.join(self.workingDir, stationOut+"_tests.xlsx"), sheet_name=stationOut)
4128
+ df = pd.read_excel(self.workingDir / (stationOut+"_tests.xlsx"), sheet_name=stationOut)
3418
4129
  # Extract all the values of the dataframe in a list
3419
4130
  all_data_tested = df.iloc[:, 1:].values.tolist()
3420
4131
  # Extract all the values of the dataframe in a numpy array
@@ -3423,4 +4134,4 @@ class Optimisation(wx.Frame):
3423
4134
  new_params = np.array([el for el in all_params if ~np.any(np.all(np.isclose(all_params_tested, el[:-1], atol=1e-6), axis=1))])
3424
4135
  return all_data_tested, new_params
3425
4136
 
3426
- return [], all_params
4137
+ return [], all_params