wolfhece 2.0.43__py3-none-any.whl → 2.0.45__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -18,6 +18,7 @@ from ..PyGui import GenMapManager,HydrologyModel
18
18
  from . import cst_exchanges as cste
19
19
  from . import constant as cst
20
20
  from ..PyTranslate import _
21
+ import traceback
21
22
 
22
23
 
23
24
  # %% Constants
@@ -37,6 +38,7 @@ class CaseOpti(GenMapManager):
37
38
  refCatchment:Catchment
38
39
  idToolItem:int
39
40
  mydro:HydrologyModel
41
+ idMenuItem:int
40
42
 
41
43
  # FIXME : this variable is just there before the seperation between the object optimisation and GUI optimisation
42
44
  wx_exists:bool
@@ -111,8 +113,10 @@ class Optimisation(wx.Frame):
111
113
 
112
114
  myCases:list[CaseOpti]
113
115
 
114
- myStations:list
115
- compareFilesDict:dict
116
+ myStations:list[str]
117
+ compareFilesDict:dict[str, str]
118
+ compareSubBasins:dict[str, SubBasin]
119
+ all_intervals:list[tuple[datetime.datetime, datetime.datetime]]
116
120
 
117
121
  # FIXME : this variable is just there before the seperation between the object optimisation and GUI optimisation
118
122
  wx_exists:bool
@@ -140,6 +144,7 @@ class Optimisation(wx.Frame):
140
144
 
141
145
  self.myStations = []
142
146
  self.compareFilesDict = {}
147
+ self.all_intervals = None
143
148
 
144
149
  self.curParams_vec_F = None
145
150
 
@@ -201,7 +206,15 @@ class Optimisation(wx.Frame):
201
206
  self.Bind(wx.EVT_MENU, self.plot_all_landuses, landuseClick)
202
207
  landuseHydroClick = toolMenu.Append(wx.ID_ANY, 'Plot all hydro landuses')
203
208
  self.Bind(wx.EVT_MENU, self.plot_all_landuses_hydro, landuseHydroClick)
204
-
209
+ internValClick = toolMenu.Append(wx.ID_ANY, 'Extract internal variables')
210
+ self.Bind(wx.EVT_MENU, self.extract_internal_variables, internValClick)
211
+ plotParetoClick = toolMenu.Append(wx.ID_ANY, 'Plot Nash vs Qexcess')
212
+ self.Bind(wx.EVT_MENU, self.plot_Nash_vs_Qexcess, plotParetoClick)
213
+ testEquiFinClick = toolMenu.Append(wx.ID_ANY, 'Test equifinality with Nash')
214
+ self.Bind(wx.EVT_MENU, self.test_equifinality_with_Nash, testEquiFinClick)
215
+ plotEquiFinClick = toolMenu.Append(wx.ID_ANY, 'Plot equifinality with Nash')
216
+ self.Bind(wx.EVT_MENU, self.plot_equifinality, plotEquiFinClick)
217
+
205
218
 
206
219
  # Creation of the Lauch Menu
207
220
  launchMenu = wx.Menu()
@@ -356,7 +369,7 @@ class Optimisation(wx.Frame):
356
369
  if nbcases>1:
357
370
  wx.MessageBox(_('So far, there can only have 1 case! This will change soon.'), _('Error'), wx.OK|wx.ICON_ERROR)
358
371
  return
359
- # self.launcherDir = []
372
+ self.launcherDir = []
360
373
  for i in range(nbcases):
361
374
  newCase = CaseOpti()
362
375
  launcherDir = self.optiParam.get_param("Cases","dir_"+str(i+1))
@@ -365,35 +378,35 @@ class Optimisation(wx.Frame):
365
378
  print("ERROR : in path of launcherDir")
366
379
  newCase.read_param(launcherDir, copyDefault=False, callback=self.update_parameters_launcher)
367
380
  # FIXME TO CHANGE when seperation with the GUI
368
- if self.wx_exists:
369
- newId = wx.Window.NewControlId()
370
- iMenu = self.MenuBar.FindMenu('Param files')
371
- paramMenu = self.MenuBar.Menus[iMenu][0]
372
- curName = 'Case '+str(i+1)
373
- iItem = self.MenuBar.FindMenuItem('Param files', curName)
374
- if(iItem==wx.NOT_FOUND):
375
- caseMenu = wx.Menu()
376
- paramCaseFile = caseMenu.Append(wx.ID_ANY, 'launcher.param')
377
- self.Bind(wx.EVT_MENU, newCase.show_launcherParam, paramCaseFile)
378
- guiHydroCase = caseMenu.Append(wx.ID_ANY, 'GUI Hydro')
379
- refDir = newCase.launcherParam.get_param("Calculs","Répertoire simulation de référence")
380
- isOk, refDir = check_path(refDir, prefix=launcherDir, applyCWD=True)
381
- if isOk<0:
382
- print("ERROR : in path of launcherDir")
383
- newCase.mydro = HydrologyModel(dir=refDir)
384
- newCase.mydro.Hide()
385
- self.Bind(wx.EVT_MENU, newCase.show_mydro, guiHydroCase)
386
- curCase = paramMenu.Append(newId, curName, caseMenu)
387
- else:
388
- print("WARNING : this scenario was not implemented yet. This might induce an error!")
389
- # iItem =
390
- curCase = paramMenu.Replace(iItem)
391
- else:
392
- refDir = newCase.launcherParam.get_param("Calculs","Répertoire simulation de référence")
393
- isOk, refDir = check_path(refDir, prefix=launcherDir, applyCWD=True)
394
- newCase.mydro = HydrologyModel(dir=refDir)
381
+ # if self.wx_exists:
382
+ # newId = wx.Window.NewControlId()
383
+ # iMenu = self.MenuBar.FindMenu('Param files')
384
+ # paramMenu = self.MenuBar.Menus[iMenu][0]
385
+ # curName = 'Case '+str(i+1)
386
+ # iItem = self.MenuBar.FindMenuItem('Param files', curName)
387
+ # if(iItem==wx.NOT_FOUND):
388
+ # caseMenu = wx.Menu()
389
+ # paramCaseFile = caseMenu.Append(wx.ID_ANY, 'launcher.param')
390
+ # self.Bind(wx.EVT_MENU, newCase.show_launcherParam, paramCaseFile)
391
+ # guiHydroCase = caseMenu.Append(wx.ID_ANY, 'GUI Hydro')
392
+ # refDir = newCase.launcherParam.get_param("Calculs","Répertoire simulation de référence")
393
+ # isOk, refDir = check_path(refDir, prefix=launcherDir, applyCWD=True)
394
+ # if isOk<0:
395
+ # print("ERROR : in path of launcherDir")
396
+ # newCase.mydro = HydrologyModel(dir=refDir)
397
+ # newCase.mydro.Hide()
398
+ # self.Bind(wx.EVT_MENU, newCase.show_mydro, guiHydroCase)
399
+ # curCase = paramMenu.Append(newId, curName, caseMenu)
400
+ # else:
401
+ # print("WARNING : this scenario was not implemented yet. This might induce an error!")
402
+ # # iItem =
403
+ # curCase = paramMenu.Replace(iItem)
404
+ # else:
405
+ # refDir = newCase.launcherParam.get_param("Calculs","Répertoire simulation de référence")
406
+ # isOk, refDir = check_path(refDir, prefix=launcherDir, applyCWD=True)
407
+ # newCase.mydro = HydrologyModel(dir=refDir)
395
408
  # self.Bind(wx.EVT_MENU, newCase.show_launcherParam, curCase)
396
- newCase.idMenuItem = newId
409
+ # newCase.idMenuItem = newId
397
410
  self.myCases.append(newCase)
398
411
 
399
412
 
@@ -417,7 +430,8 @@ class Optimisation(wx.Frame):
417
430
  self.enable_MenuBar("Debug")
418
431
 
419
432
 
420
- def apply_optim(self, event, idLauncher:int=0, replace_only_if_better:bool=False, optim_params:np.ndarray=None):
433
+ def apply_optim(self, event, idLauncher:int=0,
434
+ replace_only_if_better:bool=False, optim_params:np.ndarray=None):
421
435
  """
422
436
  Apply optimal parameters based on the results file of the optimisation : ".rpt".
423
437
 
@@ -575,7 +589,7 @@ class Optimisation(wx.Frame):
575
589
  curCatch:Catchment
576
590
  self.nbParams = int(self.myCases[idLauncher].launcherParam.get_param("Paramètres à varier", "Nombre de paramètres à varier"))
577
591
  curCatch = self.myCases[idLauncher].refCatchment
578
-
592
+ launcher_param = self.myCases[idLauncher].launcherParam
579
593
 
580
594
  for i in range(1,self.nbParams+1):
581
595
  curParam = "param_" + str(i)
@@ -585,7 +599,12 @@ class Optimisation(wx.Frame):
585
599
  # Check cst_echange.py for the values (only consider the param of the Froude model)
586
600
  if self.myParams[i]["type"]>100 and self.myParams[i]["type"]<106:
587
601
  self.myParams[i]["update"] = curCatch.update_timeDelays_from_F
588
- self.myParams[i]["junction_name"] = curCatch.junctionOut
602
+ sorted_id = int(launcher_param.get_param(curParam, "Subbasin id", default_value=0))
603
+ if sorted_id == 0:
604
+ self.myParams[i]["junction_name"] = curCatch.junctionOut
605
+ else:
606
+ cur_id = list(curCatch.dictIdConversion.keys())[list(curCatch.dictIdConversion.values()).index(sorted_id)]
607
+ self.myParams[i]["junction_name"] = curCatch.subBasinDict[cur_id].name
589
608
 
590
609
  else:
591
610
  self.myParams[i]["update"] = self.update_nothing
@@ -616,7 +635,7 @@ class Optimisation(wx.Frame):
616
635
  if isOk>0:
617
636
  optimFile = optimFileBin
618
637
  allParams = read_bin(self.workingDir, nameTMP+".rpt.dat", uniform_format=8)
619
- matrixData = np.array(allParams[-1]).astype("float")
638
+ matrixData = np.array(allParams[-1]).astype("double")
620
639
  else:
621
640
  isOk, optimFileTxt = check_path(optimFileTxt)
622
641
  if isOk>0:
@@ -629,7 +648,7 @@ class Optimisation(wx.Frame):
629
648
  if(len(raw)>1):
630
649
  if raw[0]+" "+raw[1]=="Best run":
631
650
  list_data.append(raw[3:-1])
632
- matrixData = np.array(list_data[0]).astype("float")
651
+ matrixData = np.array(list_data[0]).astype("double")
633
652
  except:
634
653
  wx.MessageBox(_('The best parameters file is not found!'), _('Error'), wx.OK|wx.ICON_ERROR)
635
654
 
@@ -833,16 +852,8 @@ class Optimisation(wx.Frame):
833
852
  compMeas = []
834
853
  if self.myStations==[]:
835
854
  self.set_compare_stations(idLauncher=idLauncher)
836
-
837
- for iOpti in range(len(self.myStations)):
838
- dateBegin = refCatch.dateBegin
839
- dateEnd = refCatch.dateEnd
840
- deltaT = refCatch.deltaT # [sec]
841
- stationOut = self.myStations[iOpti]
842
- compareFileName = self.compareFilesDict[stationOut]
843
- dir_Meas = self.workingDir
844
- compMeas.append(SubBasin(dateBegin, dateEnd, deltaT, cst.compare_opti, dir_Meas))
845
- _,compMeas[iOpti].myHydro = compMeas[iOpti].get_hydro(1, workingDir=dir_Meas, fileNames=compareFileName)
855
+
856
+ compMeas = list(self.compareSubBasins.values())
846
857
 
847
858
  # Construction of the wx window for plot
848
859
  figure = Figure(figsize=(5, 4), dpi=100)
@@ -925,6 +936,42 @@ class Optimisation(wx.Frame):
925
936
  print("ERROR: in the Fotran routine in the optimizer computation!")
926
937
 
927
938
 
939
+ def evaluate_model_optimizer(self, parameters:np.array, idOpti:int=1):
940
+
941
+ self.dllFortran.evaluate_model_optimizer_py.restype = ct.c_double
942
+ self.dllFortran.evaluate_model_optimizer_py.argtypes = [ct.POINTER(ct.c_int),
943
+ ct.POINTER(ct.c_int),
944
+ ct.POINTER(ct.c_double)]
945
+
946
+ dims = np.array([len(parameters)], dtype=ct.c_int, order='F')
947
+ p = np.array(parameters, dtype=ct.c_double, order='F')
948
+
949
+ pointerDims = dims.ctypes.data_as(ct.POINTER(ct.c_int))
950
+ pointer_p = p.ctypes.data_as(ct.POINTER(ct.c_double))
951
+
952
+ print("Launch a Fortran procedure")
953
+ obj_fct = self.dllFortran.evaluate_model_optimizer_py(ct.byref(ct.c_int(idOpti)),
954
+ pointerDims,
955
+ pointer_p)
956
+ print("End of Fortran procedure")
957
+
958
+ return obj_fct
959
+
960
+
961
+ def write_mesh_results_optimizer(self, idOpti:int=1):
962
+
963
+ self.dllFortran.write_mesh_results_optimizer_py.restype = ct.c_int
964
+ self.dllFortran.write_mesh_results_optimizer_py.argtypes = [ct.POINTER(ct.c_int)]
965
+
966
+
967
+ print("Launch a Fortran procedure")
968
+ isOk = self.dllFortran.write_mesh_results_optimizer_py(ct.byref(ct.c_int(idOpti)))
969
+ print("End of Fortran procedure")
970
+
971
+ if isOk!=0:
972
+ print("ERROR: in the Fotran routine in the optimizer computation!")
973
+
974
+
928
975
  def init_optimizer(self, idForced=-1):
929
976
 
930
977
  pathPtr = self.workingDir.encode('ansi')
@@ -1227,48 +1274,48 @@ class Optimisation(wx.Frame):
1227
1274
  """
1228
1275
  curCatch:Catchment = self.myCases[idLauncher].refCatchment
1229
1276
 
1230
- if (self.optiParam.get_group("Semi-Distributed"))is not None:
1277
+ # if (self.optiParam.get_group("Semi-Distributed"))is not None:
1278
+ try:
1231
1279
  nbRefs = self.optiParam.get_param("Semi-Distributed","nb")
1232
1280
  onlyOwnSub = self.optiParam.get_param("Semi-Distributed", "Own_SubBasin")
1233
1281
  if onlyOwnSub is None:
1234
1282
  onlyOwnSub = False
1235
1283
  doneList = []
1236
- sortJct = []
1237
- readDict = {}
1238
1284
  previousLevel = 1
1239
- # Read all ref data
1240
- for iRef in range(1, nbRefs+1):
1241
- stationOut = self.optiParam.get_param("Semi-Distributed","Station measures "+str(iRef))
1242
- compareFileName = self.optiParam.get_param("Semi-Distributed","File reference "+str(iRef))
1243
- readDict[stationOut] = compareFileName
1244
- self.compareFilesDict = readDict
1285
+ # Collect sort and save the compare stations
1286
+ self.set_compare_stations(idLauncher=idLauncher)
1287
+ sortJct = self.myStations
1288
+ readDict = self.compareFilesDict
1245
1289
  # Get the initial number of intervals
1246
1290
  # -> these can evolve according to the measurement available at each station
1247
- # FIXME : finish to generalise this part
1291
+ is_ok = self._save_opti_intervals()
1292
+ if is_ok<0:
1293
+ logging.error("Problem in optimisation intervals! Optimisation abort !")
1294
+ return
1295
+ all_intervals = self.all_intervals
1296
+ simul_intervals = curCatch.simulation_intervals
1297
+ # FIXME : to potentially remove
1248
1298
  nb_comparisons = self.comparHowParam.get_param("Comparison global characteristics","nb")
1249
- nb_intervals_init = [self.comparHowParam.get_param(" ".join(["Comparison",str(i)]),"nb intervals") for i in range(1,nb_comparisons+1)]
1299
+ nb_intervals_init = len(self.all_intervals)
1250
1300
  # Get the number of attempts with random initial conditions and from the best parameters for each station
1251
1301
  # The total number of iterations per station is the product of these two numbers :
1252
1302
  # nb_iter total = nb_iter_from_random * nb_iter_from_best
1253
1303
  nb_iter_from_random = self.optiParam.get_param("Optimizer","nb iter from random initial conditions",default_value=1)
1254
1304
  nb_iter_from_best = self.optiParam.get_param("Optimizer","nb iter from best",default_value=1)
1255
- # Check the initial parameters and if they are forced
1256
- init_params = self.get_initial_parameters()
1257
- # Sort all the junctions by level
1258
- sortJct = curCatch.sort_level_given_junctions(list(readDict.keys()), changeNames=False)
1259
- self.myStations = sortJct
1260
1305
 
1261
1306
  for iOpti in range(len(sortJct)):
1262
1307
  stationOut = sortJct[iOpti]
1263
- compareFileName = readDict[stationOut]
1264
- # Copy the correct compare.txt file
1265
- shutil.copyfile(os.path.join(self.workingDir,compareFileName), os.path.join(self.workingDir,"compare.txt"))
1308
+ # Build the current compare.txt file and replace all nan values by 0.0
1309
+ self.save_current_compare_file(stationOut=stationOut)
1266
1310
  # Save the name of the station that will be the output
1267
1311
  curCatch.define_station_out(stationOut)
1268
1312
  # Activate all the useful subs and write it in the param file
1269
1313
  curCatch.activate_usefulSubs(blockJunction=doneList, onlyItself=onlyOwnSub)
1270
- # # Select correct calibration intervals
1271
- # self.select_opti_intervals(stationOut)
1314
+ # Select correct calibration intervals -> remove the intervals with NaN
1315
+ cur_intervals = self.select_opti_intervals(all_intervals=all_intervals, stationOut=stationOut, filter_nan=True)
1316
+ self.save_opti_dates_to_file(cur_intervals)
1317
+ is_ok = self._save_opti_intervals(stationOut=stationOut, intervals=cur_intervals)
1318
+
1272
1319
  # Rename the result file
1273
1320
  self.optiParam.change_param("Optimizer", "fname", stationOut)
1274
1321
  self.optiParam.SavetoFile(None)
@@ -1276,6 +1323,11 @@ class Optimisation(wx.Frame):
1276
1323
  self.update_myParams(idLauncher)
1277
1324
  # Prepare the paramPy dictionnary before calibration
1278
1325
  self.prepare_calibration_timeDelay(stationOut=stationOut)
1326
+ # Prepare the potential discontinuous simulation
1327
+ # FIXME : to potentially uncomment or removed : probably remove because we want to generate the complete event simulations to progress in the optimisation
1328
+ # self.prepare_simulation(opti_intervals=cur_intervals, idLauncher=idLauncher)
1329
+ # Check the initial parameters and if they are forced
1330
+ init_params = self.get_initial_parameters()
1279
1331
  ## loop on the number of different optimisation attempt we would like for each station
1280
1332
  best_params_overall = None
1281
1333
  cur_i = 0
@@ -1309,6 +1361,8 @@ class Optimisation(wx.Frame):
1309
1361
  cur_i += 1
1310
1362
  # Apply the best parameters overall attemps
1311
1363
  self.apply_optim(None,optim_params=best_params_overall)
1364
+ # Reset the init parameters
1365
+ self.reset_init_params(init_params)
1312
1366
  # copy the optimisation results to save it on the disk
1313
1367
  shutil.copyfile(os.path.join(self.workingDir, stationOut+"_"+str(i_best_overal+1)+".rpt.dat"),
1314
1368
  os.path.join(self.workingDir, stationOut+".rpt.dat"))
@@ -1318,16 +1372,36 @@ class Optimisation(wx.Frame):
1318
1372
 
1319
1373
  # Simulation with the best parameters
1320
1374
  self.compute_distributed_hydro_model()
1375
+ cur_p = best_params_overall[:-1]
1376
+ cur_obj = best_params_overall[-1]
1377
+ cur_obj2 = self.evaluate_model_optimizer(cur_p, idOpti=idOpti)
1378
+ print("cur_obj : ", cur_obj, " ; cur_obj2 : ", cur_obj2)
1379
+ if cur_obj != cur_obj2:
1380
+ logging.error("The objective function is not the same as the one computed")
1321
1381
  # Update myHydro of all effective subbasins to get the best configuration upstream
1322
1382
  curCatch.read_hydro_eff_subBasin()
1323
1383
  # Update timeDelays according to time wolf_array
1324
1384
  self.apply_timeDelay_dist(idOpti=idOpti, idLauncher=idLauncher, junctionKey=stationOut)
1325
1385
  # Update the outflows
1326
1386
  curCatch.update_hydro(idCompar=0)
1387
+ # reset the simulation intervals to their initial values
1388
+ # FIXME : to potentially uncomment or removed : probably remove because we want to generate the complete event simulations to progress in the optimisation
1389
+ # self.reset_simulation_intervals(simul_intervals, idLauncher=idLauncher)
1327
1390
  # All upstream elements of a reference will be fixed
1328
1391
  doneList.append(stationOut)
1329
1392
  previousLevel = curCatch.levelOut
1330
1393
 
1394
+ # Reset the optimisation file
1395
+ self.save_opti_dates_to_file(self.all_intervals)
1396
+ except:
1397
+ print(traceback.format_exc())
1398
+ logging.error("A problem occured ! Semi-distributed optimisation abort !")
1399
+ # Reset the optimisation file
1400
+ self.save_opti_dates_to_file(self.all_intervals)
1401
+ # reset the simulation intervals to their initial values
1402
+ # FIXME : to potentially uncomment or removed : probably remove because we want to generate the complete event simulations to progress in the optimisation
1403
+ self.reset_simulation_intervals(simul_intervals, idLauncher=idLauncher)
1404
+
1331
1405
  # Possibility to use the optimisation results enabled
1332
1406
  self.enable_MenuBar("Tools")
1333
1407
 
@@ -1606,13 +1680,21 @@ class Optimisation(wx.Frame):
1606
1680
  curCatch:Catchment
1607
1681
 
1608
1682
  curCatch = self.myCases[idLauncher].refCatchment
1683
+ launcher_param = self.myCases[idLauncher].launcherParam
1684
+
1609
1685
  for i in range(1,self.nbParams+1):
1610
1686
  curParam = "param_" + str(i)
1611
- self.myParams[i]["junction_name"] = curCatch.junctionOut
1687
+
1688
+ sorted_id = int(launcher_param.get_param(curParam, "Subbasin id", default_value=0))
1689
+ if sorted_id == 0:
1690
+ self.myParams[i]["junction_name"] = curCatch.junctionOut
1691
+ else:
1692
+ cur_id = list(curCatch.dictIdConversion.keys())[list(curCatch.dictIdConversion.values()).index(sorted_id)]
1693
+ self.myParams[i]["junction_name"] = curCatch.subBasinDict[cur_id].name
1612
1694
 
1613
1695
 
1614
1696
 
1615
- ## Function to determine the
1697
+ ## Function to determine the compare stations, compare files and the compare station SubBasin objects for each station
1616
1698
  def set_compare_stations(self, idLauncher):
1617
1699
 
1618
1700
  if (self.optiParam.get_group("Semi-Distributed"))!=None:
@@ -1628,6 +1710,19 @@ class Optimisation(wx.Frame):
1628
1710
  self.compareFilesDict = readDict
1629
1711
  # Sort all the junctions by level
1630
1712
  self.myStations = refCatch.sort_level_given_junctions(list(readDict.keys()), changeNames=False)
1713
+ # Prepare the SubBasin compare objects for each station.
1714
+ self.compareSubBasins = {stationOut: SubBasin(name=stationOut, _model=cst.compare_opti, _workingDir=self.workingDir)
1715
+ for stationOut in self.myStations}
1716
+ # This loop read all the measure and init the hydro surface of each SubBasin element
1717
+ for key, cur_obj in self.compareSubBasins.items():
1718
+ tmp, cur_obj.myHydro = cur_obj.get_hydro(1, workingDir=self.workingDir, fileNames=readDict[key])
1719
+ keyBasin = refCatch.get_key_catchmentDict(key)
1720
+ cur_basin = refCatch.catchmentDict[keyBasin]
1721
+ cur_obj.surfaceDrained = cur_basin.surfaceDrainedHydro
1722
+ cur_obj.surfaceDrainedHydro = cur_basin.surfaceDrainedHydro
1723
+ cur_obj.compute_hydro()
1724
+ # FIXME : generalise this verification or allow the measurements to adapt or build themselves correctly !!!
1725
+ # assert cur_obj.dateBegin==refCatch.dateBegin and cur_obj.dateEnd==refCatch.dateEnd, "The measures and simulations does not have compatible intervals!"
1631
1726
 
1632
1727
 
1633
1728
  def destroyOpti(self, event):
@@ -1774,43 +1869,69 @@ class Optimisation(wx.Frame):
1774
1869
  doneList.append(stationOut)
1775
1870
 
1776
1871
 
1777
- def read_all_attempts_SA(self, format="rpt"):
1872
+ def read_all_attempts_SA(self, format="rpt", all_attempts=False, filter_repetitions=True):
1873
+
1778
1874
  nameTMP = self.optiParam.get_param("Optimizer","fname")
1875
+ if all_attempts:
1876
+ nb_iter_from_random = self.optiParam.get_param("Optimizer","nb iter from random initial conditions",
1877
+ default_value=1)
1878
+ nb_iter_from_best = self.optiParam.get_param("Optimizer","nb iter from best",
1879
+ default_value=1)
1880
+ nb_attempts = nb_iter_from_random * nb_iter_from_best
1881
+ all_names = [nameTMP+"_"+str(i+1) for i in range(nb_attempts)]
1882
+ else:
1883
+ all_names = [nameTMP]
1884
+
1885
+ matrixParam = np.empty((0, self.nbParams), dtype="double")
1886
+ vectorObjFct = np.empty((0,), dtype="double")
1779
1887
 
1780
1888
  if format=="rpt":
1781
- optimFile = os.path.join(self.workingDir, nameTMP+".rpt")
1889
+ for cur_file in all_names:
1890
+ optimFile = os.path.join(self.workingDir, cur_file+".rpt")
1782
1891
 
1783
- try:
1784
- with open(optimFile, newline = '') as fileID:
1785
- data_reader = csv.reader(fileID, delimiter='|',skipinitialspace=True, )
1786
- list_param = []
1787
- list_ObjFct = []
1788
- line = 0
1789
- for raw in data_reader:
1790
- if(line<3):
1892
+ try:
1893
+ with open(optimFile, newline = '') as fileID:
1894
+ data_reader = csv.reader(fileID, delimiter='|',skipinitialspace=True, )
1895
+ list_param = []
1896
+ list_ObjFct = []
1897
+ line = 0
1898
+ for raw in data_reader:
1899
+ if(line<3):
1900
+ line += 1
1901
+ continue
1902
+ if(len(raw)<=1):
1903
+ break
1904
+ else:
1905
+ usefulData = raw[2:-2]
1906
+ list_param.append(usefulData)
1907
+ list_ObjFct.append(raw[-2])
1791
1908
  line += 1
1792
- continue
1793
- if(len(raw)<=1):
1794
- break
1795
- else:
1796
- usefulData = raw[2:-2]
1797
- list_param.append(usefulData)
1798
- list_ObjFct.append(raw[-2])
1799
- line += 1
1800
- matrixParam = np.array(list_param).astype("float")
1801
- vectorObjFct = np.array(list_ObjFct).astype("float")
1802
- except:
1803
- wx.MessageBox(_('The best parameters file is not found!'), _('Error'), wx.OK|wx.ICON_ERROR)
1909
+ matrixParam = np.vstack((matrixParam,
1910
+ np.array(list_param).astype("double")))
1911
+ vectorObjFct = np.append(vectorObjFct,
1912
+ np.array(list_ObjFct).astype("double"))
1913
+ except:
1914
+ wx.MessageBox(_('The best parameters file is not found!'), _('Error'), wx.OK|wx.ICON_ERROR)
1804
1915
 
1805
1916
  elif format==".dat":
1806
- optimFile = os.path.join(self.workingDir, nameTMP+".rpt.dat")
1807
- isOk, optimFile = check_path(optimFile)
1808
- if isOk>0:
1809
- allData = read_bin(self.workingDir, nameTMP+".rpt.dat", uniform_format=8)
1810
- allData = np.array(allData).astype("float")
1811
- matrixParam = allData[:-1,:-1]
1812
- vectorObjFct = allData[:-1,-1]
1813
-
1917
+ for cur_file in all_names:
1918
+ optimFile = os.path.join(self.workingDir, cur_file+".rpt.dat")
1919
+ isOk, optimFile = check_path(optimFile)
1920
+ if isOk>0:
1921
+ allData = read_bin(self.workingDir, cur_file+".rpt.dat", uniform_format=8)
1922
+ allData = np.array(allData).astype("double")
1923
+ matrixParam = np.vstack((matrixParam, allData[:-1,:-1]))
1924
+ vectorObjFct = np.append(vectorObjFct, allData[:-1,-1])
1925
+
1926
+ if filter_repetitions:
1927
+ logging.info("Filtering the repetitions in the attempts!")
1928
+ filter_matrix, indices, inverse, counts = np.unique(matrixParam, axis=0,
1929
+ return_index=True,
1930
+ return_inverse=True,
1931
+ return_counts=True)
1932
+ vectorObjFct = vectorObjFct[indices]
1933
+ matrixParam = filter_matrix
1934
+ logging.info("The max number of repetitions = "+ str(np.max(counts)))
1814
1935
 
1815
1936
  return matrixParam, vectorObjFct
1816
1937
 
@@ -1819,36 +1940,46 @@ class Optimisation(wx.Frame):
1819
1940
 
1820
1941
  refCatch:Catchment = self.myCases[idLauncher].refCatchment
1821
1942
  myModel = refCatch.myModel
1822
- filePath = os.path.join(refCatch.workingDir, "Subbasin_" + str(refCatch.myEffSortSubBasins[0]) + "\\")
1823
-
1824
- myModelDict = cste.modelParamsDict[myModel]["Parameters"]
1825
1943
 
1826
1944
  if self.curParams_vec_F is None \
1827
- or len(self.curParams_vec_F) != self.nbParams:
1945
+ or len(self.curParams_vec_F) != self.nbParams:
1946
+
1828
1947
  self.curParams_vec_F = np.empty((self.nbParams,), dtype=ct.c_double, order='F')
1948
+
1949
+ myModelDict = cste.modelParamsDict[myModel]["Parameters"]
1950
+
1951
+ for cur_effsub in range(len(refCatch.myEffSubBasins)):
1829
1952
 
1830
- for i in range(self.nbParams):
1831
- myType = self.myParams[i+1]["type"]
1832
- if(int(myType)>0):
1833
- self.myParams[i+1]["value"] = params[i]
1834
- fileName = myModelDict[int(myType)]["File"]
1835
- myGroup = myModelDict[int(myType)]["Group"]
1836
- myKey = myModelDict[int(myType)]["Key"]
1837
- if "Convertion Factor" in myModelDict[int(myType)]:
1838
- convFact = myModelDict[int(myType)]["Convertion Factor"]
1953
+ filePath = os.path.join(refCatch.workingDir, "Subbasin_" + str(refCatch.myEffSortSubBasins[cur_effsub]))
1954
+
1955
+ for i in range(self.nbParams):
1956
+ myType = self.myParams[i+1]["type"]
1957
+ if(int(myType)>0):
1958
+ # If the parameter is not for the current effective subbasin
1959
+ # then we skip it
1960
+ if "junction_name" in self.myParams[i+1]:
1961
+ cur_sub = refCatch.catchmentDict[refCatch.get_key_catchmentDict(self.myParams[i+1]["junction_name"])]
1962
+ if cur_sub.iDSorted != refCatch.myEffSortSubBasins[cur_effsub]:
1963
+ continue
1964
+ self.myParams[i+1]["value"] = params[i]
1965
+ fileName = myModelDict[int(myType)]["File"]
1966
+ myGroup = myModelDict[int(myType)]["Group"]
1967
+ myKey = myModelDict[int(myType)]["Key"]
1968
+ if "Convertion Factor" in myModelDict[int(myType)]:
1969
+ convFact = myModelDict[int(myType)]["Convertion Factor"]
1970
+ else:
1971
+ convFact = 1.0
1972
+ tmpWolf = Wolf_Param(to_read=True, filename=os.path.join(filePath,fileName),toShow=False, init_GUI=False)
1973
+ tmpWolf.change_param(myGroup, myKey, params[i]/convFact)
1974
+ tmpWolf.SavetoFile(None)
1975
+ # tmpWolf.OnClose(None)
1976
+ tmpWolf = None
1839
1977
  else:
1840
- convFact = 1.0
1841
- tmpWolf = Wolf_Param(to_read=True, filename=filePath+fileName,toShow=False)
1842
- tmpWolf.myparams[myGroup][myKey]["value"] = params[i]/convFact
1843
- tmpWolf.SavetoFile(None)
1844
- tmpWolf.OnClose(None)
1845
- tmpWolf = None
1846
- else:
1847
-
1848
- self.curParams_vec_F[i] = params[i]
1849
- self.update_timeDelay(i+1)
1850
- refCatch.save_timeDelays([self.myParams[i+1]["junction_name"]])
1851
- print("TO DO : Complete the python parameter dict!!!!!!!")
1978
+
1979
+ self.curParams_vec_F[i] = params[i]
1980
+ self.update_timeDelay(i+1)
1981
+ refCatch.save_timeDelays([self.myParams[i+1]["junction_name"]])
1982
+ print("TO DO : Complete the python parameter dict!!!!!!!")
1852
1983
 
1853
1984
 
1854
1985
 
@@ -1884,7 +2015,7 @@ class Optimisation(wx.Frame):
1884
2015
  nb_params = int(paramDict.get_param("Paramètres à varier", "Nombre de paramètres à varier"))
1885
2016
 
1886
2017
  myModel = self.myCases[idLauncher].refCatchment.myModel
1887
- nbParamsModel = cste.modelParamsDict[myModel]["Nb"]
2018
+ nbParamsModel = cste.modelParamsDict[myModel]["Nb"]*len(cur_opti.refCatchment.myEffSubBasins)
1888
2019
 
1889
2020
  for i in range(1,nb_params+1):
1890
2021
  curParam = "param_" + str(i)
@@ -1894,41 +2025,214 @@ class Optimisation(wx.Frame):
1894
2025
  nb_params -= 1
1895
2026
 
1896
2027
  # Test
2028
+ # assert nb_params > nbParamsModel, "The number of parameters to optimize is not equal to the number of parameters of the model!"
1897
2029
  if nb_params > nbParamsModel:
1898
2030
  logging.error("The number of to optimise are greater than the number of max parameter of the model!! ")
2031
+ assert nb_params > nbParamsModel, "The number of parameters to optimize is not equal to the number of parameters of the model!"
1899
2032
  return
1900
2033
 
1901
2034
  self.myCases[idLauncher].launcherParam.change_param("Paramètres à varier", "Nombre de paramètres à varier", nb_params)
1902
2035
 
1903
2036
  return
1904
2037
 
2038
+
2039
+ def _read_opti_intervals(self, idLauncher:int=0)->list[tuple[datetime.datetime, datetime.datetime]]:
2040
+ """
2041
+ .. todo::
2042
+ - Add the measure of the comparison file in properties of the object opti
2043
+ - Check according to the current Observation, which comparision intervals are posssible -> and sort them
2044
+ - Save the comparison intervals somewhere
2045
+ - Save the useful comparison intervals somewhere
2046
+ - Return the useful intervals.
2047
+ """
2048
+ # file_compare = os.path.join(self.workingDir,"compare.txt")
2049
+ # isOk, file_compare = check_path(file_compare)
2050
+ # if isOk<0:
2051
+ # logging.error("The file compare.txt is not found!")
2052
+ # return
2053
+
2054
+ # Read the comparison file
2055
+ if self.myStations==[]:
2056
+ self.set_compare_stations(idLauncher=idLauncher)
2057
+
2058
+ nb_comparison = self.comparHowParam.get_param("Comparison global characteristics", "nb")
2059
+ str_di = "date begin"
2060
+ str_df = "date end"
2061
+
2062
+ intervals = []
2063
+ for icomp in range(1, nb_comparison+1):
2064
+ cur_key = " ".join(["Comparison", str(icomp)])
2065
+ nb_intervals = self.comparHowParam.get_param(cur_key, "nb intervals")
2066
+ for i_inter in range(1,nb_intervals+1):
2067
+ str_read = self.comparHowParam.get_param(cur_key, " ".join([str_di,str(i_inter)]))
2068
+ di = datetime.datetime.strptime(str_read, cst.DATE_FORMAT_HYDRO).replace(tzinfo=datetime.timezone.utc)
2069
+ str_read = self.comparHowParam.get_param(cur_key," ".join([str_df,str(i_inter)]))
2070
+ df = datetime.datetime.strptime(str_read, cst.DATE_FORMAT_HYDRO).replace(tzinfo=datetime.timezone.utc)
2071
+ # Check that di is a timestamp lower than other date #FIXME : to be transfer in a test function !!!!
2072
+ if di>df:
2073
+ logging.error("The date end is lower than the date begin!")
2074
+ return None
2075
+ else:
2076
+ intervals.append((di,df))
2077
+
2078
+
2079
+ return intervals
2080
+
2081
+
2082
+ def _save_opti_intervals(self, idLauncher:int=0, stationOut:str="",
2083
+ intervals:list[tuple[datetime.datetime, datetime.datetime]]=None)->int:
2084
+ if stationOut == "":
2085
+ suffix = "0"
2086
+ else:
2087
+ suffix = stationOut
2088
+
2089
+ if intervals is None:
2090
+ self.all_intervals = self._read_opti_intervals(idLauncher=idLauncher)
2091
+
2092
+ compare_file = os.path.join(self.workingDir,"compare.how.param")
2093
+
2094
+ # In case of a problem, the initial compare file is copied
2095
+ compare_file_cp = os.path.join(self.workingDir,"compare.how_"+suffix+"_tmp.param")
2096
+ isOk, compare_file_cp = check_path(compare_file_cp)
2097
+ if isOk<0 and stationOut=="":
2098
+ compare_file_cp = os.path.join(self.workingDir,"compare.how_"+suffix+".param")
2099
+ shutil.copyfile(compare_file, compare_file_cp)
2100
+ print("The following file has been copied : ", compare_file_cp)
2101
+ else:
2102
+ shutil.copyfile(compare_file, compare_file_cp)
2103
+ print("The following file has been copied : ", compare_file_cp)
2104
+
2105
+ if self.all_intervals is None:
2106
+ return -1
2107
+ else:
2108
+ return 0
1905
2109
 
1906
- def select_opti_intervals(self, idLauncher:int=0, stationOut=""):
2110
+
2111
+ def select_opti_intervals(self, all_intervals:list[tuple[datetime.datetime, datetime.datetime]]=None,
2112
+ idLauncher:int=0, stationOut="", filter_nan:bool=True)->list[tuple]:
2113
+ """
2114
+ .. todo::
2115
+ - Add the measure of the comparison file in properties of the object opti
2116
+ - Check according to the current Observation, which comparision intervals are posssible -> and sort them
2117
+ - Save the comparison intervals somewhere
2118
+ - Save the useful comparison intervals somewhere
2119
+ - Return the useful intervals.
2120
+ """
1907
2121
  cur_opti = self.myCases[idLauncher]
1908
2122
  cur_ref = cur_opti.refCatchment
2123
+
1909
2124
  if stationOut == "":
1910
2125
  stationOut = cur_ref.junctionOut
1911
- file_compare = os.path.join(self.workingDir,"compare.txt")
1912
- isOk, file_compare = check_path(file_compare)
1913
- if isOk<0:
1914
- logging.error("The file compare.txt is not found!")
1915
- return
1916
- meas_hydro = SubBasin()
2126
+
2127
+ if all_intervals is None:
2128
+ if self.all_intervals is None:
2129
+ logging.error("The intervlas are not defined! Please add them in the function arguments or use the funcion '_save_opti_intervals()' to save them internally (at your own risk!)")
2130
+ # id_ok= self._save_opti_intervals(idLauncher=idLauncher)
2131
+ # if id_ok<0:
2132
+ # return None
2133
+
2134
+ else:
2135
+ all_intervals = self.all_intervals
2136
+
2137
+ if self.myStations==[]:
2138
+ self.set_compare_stations(idLauncher=idLauncher)
2139
+
2140
+ keyBasin = cur_ref.get_key_catchmentDict(stationOut)
2141
+ cur_basin = cur_ref.catchmentDict[keyBasin]
2142
+
2143
+ # Select the optimisation intervals that are relevant according to the available measures
2144
+ effective_intv = [interv for interv in all_intervals if interv[0]>=cur_basin.dateBegin and interv[1]<=cur_basin.dateEnd]
2145
+ if filter_nan:
2146
+ effective_intv = self._define_intervals_with_nan_measures(effective_intv, self.compareSubBasins,
2147
+ idLauncher=idLauncher, stationOut=stationOut)
2148
+
2149
+ return effective_intv
2150
+
2151
+
2152
+ def _define_intervals_with_nan_measures(self, intervals: list[tuple[datetime.datetime, datetime.datetime]], measures: dict[str, SubBasin],
2153
+ idLauncher: int = 0, stationOut: str = ""):
2154
+ """
2155
+ Defines new intervals excluding all NaN measures based on the given intervals and measures dictionary.
2156
+ For instance, if there is continuous NaN measures within a given interval, the function will split
2157
+ that interval into smaller that do not contain NaN measures.
2158
+
2159
+ Args:
2160
+ intervals (list[tuple[datetime.datetime, datetime.datetime]]): A list of intervals represented as tuples of start and end datetime objects.
2161
+ measures (dict[str, SubBasin]): A dictionary of measures where the keys are station names and the values are SubBasin objects.
2162
+ idLauncher (int, optional): The id of the launcher. Defaults to 0.
2163
+ stationOut (str, optional): The station name. Defaults to "".
2164
+
2165
+ Returns:
2166
+ list[tuple[datetime.datetime, datetime.datetime]]: A list of intervals with NaN measures.
2167
+
2168
+ Raises:
2169
+ None
2170
+
2171
+ """
2172
+ if stationOut not in measures:
2173
+ logging.error("The stationOut is not in the measures dictionary!")
2174
+ return None
2175
+
2176
+ cur_el = measures[stationOut]
2177
+ hydro = cur_el.get_myHydro()
2178
+ time = cur_el.time
2179
+ # get the indices of the nan values
2180
+ non_nan_locations = ~np.isnan(hydro)
2181
+ within_intervals = np.sum(
2182
+ [(time >= datetime.datetime.timestamp(interv[0])) *
2183
+ (time <= datetime.datetime.timestamp(interv[1]))
2184
+ for interv in intervals],
2185
+ axis=0) > 0
2186
+ # Both conditions should be satisfied
2187
+ all_conditions = np.where(non_nan_locations * within_intervals)[0]
2188
+
2189
+ # Check all the discontinuities and the indices they start
2190
+ # i.e. when the index difference is not 1
2191
+ # +1 as the np.diff is one element sooner than nan_locations: diff[0]=v[1]-v[0]
2192
+ group_starts = np.where(np.diff(all_conditions) != 1)[0] + 1
2193
+
2194
+ # Add 0 as it is the first index of the first group
2195
+ group_starts = np.insert(group_starts, 0, 0)
2196
+
2197
+ # Identify where the groups stop.
2198
+ group_ends = np.append(group_starts[1:] - 1, len(all_conditions)-1)
2199
+
2200
+ # Get the timestamps of the first and last nan element and form groups of discontinuities
2201
+ iterv_timestamp = [(time[all_conditions[i_i]], time[all_conditions[i_f]]) for i_i, i_f in zip(group_starts, group_ends)]
2202
+ interv_dates = [(datetime.datetime.fromtimestamp(iterv[0],tz=datetime.timezone.utc),
2203
+ datetime.datetime.fromtimestamp(iterv[1], tz=datetime.timezone.utc))
2204
+ for iterv in iterv_timestamp]
2205
+
2206
+ return interv_dates
2207
+
2208
+
2209
+ def save_opti_dates_to_file(self, opti_dates:list[tuple[datetime.datetime,datetime.datetime]]):
2210
+ """
2211
+ Here the procedure is saving the intervals of dates for calibration in the compare.how.param
2212
+ """
2213
+ # Verifications
2214
+ assert len(opti_dates)>0, "The list of dates is empty!"
2215
+ for i_opti in opti_dates:
2216
+ assert i_opti[1]>i_opti[0], "The start date is not lower than the end date!"
1917
2217
 
1918
2218
  nb_comparison = self.comparHowParam.get_param("Comparison global characteristics", "nb")
2219
+
1919
2220
  str_di = "date begin"
1920
2221
  str_df = "date end"
1921
- for icomp in range(1, nb_comparison):
1922
- cur_key = " ".join("Comparison", str(icomp))
1923
- nb_intervals = self.comparHowParam.get_param(cur_key, "nb_intervals")
1924
- for i_inter in range(1,nb_intervals):
1925
- str_read = self.comparHowParam.get_param(" ".join(str_di,str(i_inter)))
1926
- di = datetime.datetime.timestamp(
1927
- datetime.datetime.strptime(str_read, cst.DATE_FORMAT_HYDRO).replace(tzinfo=datetime.timezone.utc))
1928
- str_read = self.comparHowParam.get_param(" ".join(str_df,str(i_inter)))
1929
- df = datetime.datetime.timestamp(
1930
- datetime.datetime.strptime(str_read, cst.DATE_FORMAT_HYDRO).replace(tzinfo=datetime.timezone.utc))
1931
- # Check that di is a timestamp lower than other date
2222
+
2223
+ for icomp in range(1, nb_comparison+1):
2224
+ cur_key = " ".join(["Comparison", str(icomp)])
2225
+ nb_intervals = len(opti_dates)
2226
+ self.comparHowParam.change_param(cur_key, "nb intervals", nb_intervals)
2227
+ for i_inter in range(1,nb_intervals+1):
2228
+ di = datetime.datetime.strftime(opti_dates[i_inter-1][0], cst.DATE_FORMAT_HYDRO)
2229
+ df = datetime.datetime.strftime(opti_dates[i_inter-1][1], cst.DATE_FORMAT_HYDRO)
2230
+ # FIXME : Change addparam to add_param
2231
+ self.comparHowParam.addparam(cur_key, " ".join([str_di,str(i_inter)]), di, type="str")
2232
+ self.comparHowParam.addparam(cur_key, " ".join([str_df,str(i_inter)]), df, type="str")
2233
+
2234
+ self.comparHowParam.SavetoFile(None)
2235
+ self.comparHowParam.Reload(None)
1932
2236
 
1933
2237
 
1934
2238
  def prepare_init_params_from_best(self, best_params:np.array, idLauncher:int=0):
@@ -1959,8 +2263,635 @@ class Optimisation(wx.Frame):
1959
2263
  init_params = None
1960
2264
 
1961
2265
  return init_params
2266
+
2267
+
2268
+ def reset_init_params(self, init_params:np.array):
2269
+ if init_params is None:
2270
+ return
2271
+ for i in range(self.nbParams):
2272
+ self.saParam.change_param("Initial parameters", " ".join(["Parameter",str(i+1)]), init_params[i])
2273
+ print("Reset init params : ", init_params)
2274
+ self.saParam.SavetoFile(None)
2275
+ self.saParam.Reload(None)
2276
+
2277
+
2278
+ def extract_internal_variables(self, event, idLauncher:int=0, to_plot:bool=True):
2279
+ curCatch:Catchment = self.myCases[idLauncher].refCatchment
2280
+
2281
+ ## Check the relevance to launch the detailed simulation to extract the internal variables
2282
+ all_x = curCatch.get_all_x_production()
2283
+ all_iv = curCatch.get_all_iv_production()
2284
+ # Graphical interface to ask the user if he wants to launch the detailed simulation
2285
+ if all_x=={} or all_iv=={}:
2286
+ to_generate = True
2287
+ if self.wx_exists:
2288
+ dlg = wx.MessageDialog(None, "No internal variables were detected! Do you want to launch a detailed simulation ?", "Warning", wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
2289
+ r = dlg.ShowModal()
2290
+ if r == wx.ID_YES:
2291
+ to_generate = True
2292
+ dlg.Destroy()
2293
+ else:
2294
+ dlg.Destroy()
2295
+ return None
2296
+ else:
2297
+ to_generate = True
2298
+ else:
2299
+ to_generate = False
2300
+ if self.wx_exists:
2301
+ dlg = wx.MessageDialog(None, "Internal variables were detected! Do you still want to launch a detailed simulation ?", "Warning", wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
2302
+ r = dlg.ShowModal()
2303
+ if r == wx.ID_YES:
2304
+ to_generate = True
2305
+ dlg.Destroy()
2306
+ # FIXME : ADD the terminal
2307
+ else:
2308
+ to_generate = False
2309
+ # Enter here if a detailed simulation is required
2310
+ if to_generate:
2311
+ curCatch.activate_all_internal_variables()
2312
+ self.generate_semiDist_optim_simul(None, idLauncher=idLauncher)
2313
+ all_x = curCatch.get_all_x_production()
2314
+ all_iv = curCatch.get_all_iv_production()
2315
+
2316
+ effective_intv = self.select_opti_intervals(idLauncher=idLauncher, stationOut="")
2317
+ all_Nash = {"Nash" : self.get_all_Nash()}
2318
+ all_frac = curCatch.get_all_fractions(summary="mean", summary_interval=effective_intv, add_info=all_Nash)
2319
+
2320
+
2321
+ if to_plot:
2322
+ for interv in effective_intv:
2323
+ all_frac = curCatch.plot_all_fractions(all_fractions=all_frac, to_show=True, range_data=list(interv))
2324
+
2325
+ return all_x, all_iv, all_frac
2326
+
2327
+
2328
+ def _check_presence_of_iv(self, idLauncher:int=0):
2329
+ curCatch:Catchment = self.myCases[idLauncher].refCatchment
2330
+ return curCatch.check_presence_of_iv()
2331
+
2332
+
2333
+ def plot_Nash_vs_Qexcess(self, event, idLauncher:int=0):
2334
+ curCatch:Catchment = self.myCases[idLauncher].refCatchment
2335
+ all_params, all_nash = self.read_all_attempts_SA(format=".dat")
2336
+ nb_tests = np.shape(all_nash)[0]
2337
+
2338
+ if self.myStations==[]:
2339
+ self.set_compare_stations(idLauncher=idLauncher)
2340
+
2341
+ compMeas = []
2342
+ for iOpti in range(len(self.myStations)):
2343
+ dateBegin = curCatch.dateBegin
2344
+ dateEnd = curCatch.dateEnd
2345
+ deltaT = curCatch.deltaT # [sec]
2346
+ stationOut = self.myStations[iOpti]
2347
+ compareFileName = self.compareFilesDict[stationOut]
2348
+ dir_Meas = self.workingDir
2349
+ compMeas.append(SubBasin(dateBegin, dateEnd, deltaT, cst.compare_opti, dir_Meas))
2350
+ _,cur_comp = compMeas[iOpti].get_hydro(1, workingDir=dir_Meas, fileNames=compareFileName)
2351
+ keyBasin = curCatch.get_key_catchmentDict(stationOut)
2352
+ cur_basin = curCatch.catchmentDict[keyBasin]
2353
+ cur_comp = cur_comp*cur_basin.surfaceDrained/3.6
2354
+ all_qtests = curCatch.get_all_Qtest(nb_atttempts=nb_tests, selection_by_iD=[stationOut])
2355
+ # FIXME : Check the type of interpolation to use
2356
+ interp_qcomp = np.interp(curCatch.time, compMeas[iOpti].time, cur_comp)
2357
+ q_diff = np.array([np.count_nonzero((qtest-interp_qcomp <0.0) & (qtest != 0.0))/np.count_nonzero((qtest != 0.0))
2358
+ for qtest in all_qtests[0]])
2359
+ fig, ax = plt.subplots()
2360
+ for i in range(nb_tests-1):
2361
+ ax.scatter(q_diff[i], all_nash[i], s=0.5, c='b', marker='o', alpha=i/nb_tests)
2362
+ ax.scatter(q_diff[-1], all_nash[-1], s=0.5, c='b', marker='o', label="test", alpha=1)
2363
+ # ax.scatter(q_diff, all_nash, s=0.5, c='b', marker='o', label="test")
2364
+ ax.set_xlabel("Non-exceedance fraction. Portion of the observations below the simulated series (Qs>Qo)")
2365
+ ax.set_ylabel("Nash-Sutcliffe efficiency")
2366
+ ax.set_ylim(1.0, -1.0)
2367
+ ax.set_xlim(0.0, 1.0)
2368
+
2369
+ i_best = np.argmax(all_nash)
2370
+
2371
+ ax.scatter(q_diff[i_best], all_nash[i_best], color='red', s=30, label="Best Nash")
2372
+ ax.set_title("2000-2011 GR4H "+stationOut)
2373
+ ax.legend()
2374
+ fig.savefig(os.path.join(curCatch.workingDir, "PostProcess/Nash_vs_Qexcess_"+stationOut+".png"))
2375
+
1962
2376
 
2377
+ plt.show()
2378
+
2379
+
2380
+ def get_all_Nash(self):
1963
2381
 
2382
+ return {cur_file: self.collect_optim(cur_file)[-1] for cur_file in self.myStations}
2383
+
2384
+ # FIXME this function is not correct -> to be corrected and delete the remove_py_params and updtate_myParams calls
2385
+ def get_all_params(self, idLauncher:int=0):
2386
+ curCatch:Catchment = self.myCases[idLauncher].refCatchment
2387
+
2388
+ hydro_model = curCatch.myModel
2389
+
2390
+ # Read the comparison file
2391
+ if self.myStations==[]:
2392
+ self.set_compare_stations(idLauncher=idLauncher)
2393
+
2394
+ calibrate_timeDelay = bool(int(self.optiParam.get_param("Semi-Distributed", "Calibrate_times")))
2395
+ myModelDict = cste.modelParamsDict[hydro_model]["Parameters"]
2396
+
2397
+ all_names = {}
2398
+ for stationOut in self.myStations:
2399
+ curCatch.define_station_out(stationOut)
2400
+ self.remove_py_params(idLauncher)
2401
+ self.update_myParams(idLauncher)
2402
+
2403
+ id_params = [self.myParams[i]["type"] for i in range(1,self.nbParams+1)]
2404
+ names = [myModelDict[cur_id]["Name"] for cur_id in id_params if cur_id>0]
2405
+ all_names[stationOut] = names
2406
+ if calibrate_timeDelay:
2407
+ # Get_nb inlets
2408
+ inletsNames = self.myCases[idLauncher].refCatchment.get_inletsName(stationOut)
2409
+ nbInlets = len(inletsNames)
2410
+ for i in range(nbInlets):
2411
+ names.append("TimeDelay "+inletsNames[i])
2412
+ # Complete the names according to the stations concerned
2413
+
2414
+ optim = {cur_file: self.collect_optim(cur_file) for cur_file in self.myStations}
2415
+
2416
+ # all_params = {}
2417
+ # for key, value in optim.items():
2418
+ # all_params[key] = {}
2419
+ # for i, cur_name in enumerate(all_names):
2420
+ # all_params[key][cur_name] = value[i]
2421
+
2422
+ all_params = {key:
2423
+ {cur_name : value[i] for i, cur_name in enumerate(all_names)}
2424
+ for key, value in optim.items()}
2425
+
2426
+ return all_params
2427
+
2428
+
2429
+ def save_all_params(self, all_params:dict={}, idLauncher:int=0):
2430
+
2431
+ all_keys = list(all_params.keys())
2432
+
2433
+ return
2434
+
2435
+
2436
+ def save_current_compare_file(self, stationOut: str):
2437
+ """
2438
+ Save the current compare file for a given station to prepare optimisation with Fortran.
2439
+
2440
+ Args:
2441
+ stationOut (str): The station identifier.
2442
+
2443
+ Returns:
2444
+ None
2445
+
2446
+ Raises:
2447
+ None
2448
+ """
2449
+ compare_file_name = self.compareFilesDict[stationOut]
2450
+ cur_sub = self.compareSubBasins[stationOut]
2451
+
2452
+ time = cur_sub.time
2453
+ hydro = cur_sub.get_myHydro()
2454
+ hydro = np.nan_to_num(hydro, nan=0.0)
2455
+
2456
+ data = np.column_stack((time, hydro))
2457
+ # Define header
2458
+ header = f"{data.shape[0]:d}\t{data.shape[1]:d}"
2459
+ # Write to file
2460
+ np.savetxt(
2461
+ os.path.join(self.workingDir, "compare.txt"),
2462
+ data,
2463
+ header=header,
2464
+ fmt=["%d", "%e"],
2465
+ comments="",
2466
+ delimiter="\t",
2467
+ )
2468
+
2469
+
2470
+ def prepare_simulation(self, opti_intervals:list[tuple[datetime.datetime, datetime.datetime]],
2471
+ idLauncher:int=0):
2472
+
2473
+ cur_catch = self.myCases[idLauncher].refCatchment
2474
+ # TODO : Create an object hydro intervals with activate property and a method to retrun a list of tuples
2475
+ simul_intevals = cur_catch.simulation_intervals
2476
+ # See which simulation intervals should be activated
2477
+ eff_simul_intervals = []
2478
+ for simul_intrv in simul_intevals:
2479
+ to_activate = False
2480
+ for cur_opti_intrv in opti_intervals:
2481
+ if cur_opti_intrv[0]>simul_intrv[0] and cur_opti_intrv[1]<simul_intrv[1]:
2482
+ to_activate = True
2483
+ break
2484
+ if to_activate:
2485
+ eff_simul_intervals.append(simul_intrv)
2486
+
2487
+ cur_catch.simulation_intervals = eff_simul_intervals
2488
+
2489
+ return
2490
+
2491
+
2492
+ def reset_simulation_intervals(self, default_interval:list[tuple[datetime.datetime, datetime.datetime]],
2493
+ idLauncher:int=0):
2494
+
2495
+ cur_catch = self.myCases[idLauncher].refCatchment
2496
+ cur_catch.simulation_intervals = default_interval
2497
+
2498
+ return
2499
+
2500
+ # FIXME : this function has been dashed off -> functionnal but not well written!!
2501
+ # TODO : to improve !!!!!!
2502
+ def test_equifinality_with_Nash(self, event, idLauncher:int=0, idOpti:int=1, quantile_Nash:float=0.6, std_Nash:float=0.02, clustering_Nash:bool=True):
2503
+ """
2504
+ Test the equifinality of the model.
2505
+
2506
+ Args:
2507
+ idLauncher (int, optional): The id of the launcher. Defaults to 0.
2508
+
2509
+ Returns:
2510
+ None
2511
+
2512
+ Raises:
2513
+ None
2514
+ """
2515
+ curCatch:Catchment = self.myCases[idLauncher].refCatchment
2516
+
2517
+ onlyOwnSub = self.optiParam.get_param("Semi-Distributed", "Own_SubBasin")
2518
+ if onlyOwnSub is None:
2519
+ onlyOwnSub = False
2520
+ doneList = []
2521
+ previousLevel = 1
2522
+ # Collect sort and save the compare stations
2523
+ self.set_compare_stations(idLauncher=idLauncher)
2524
+ sortJct = self.myStations
2525
+ # Get the initial number of intervals
2526
+ # -> these can evolve according to the measurement available at each station
2527
+ is_ok = self._save_opti_intervals()
2528
+ all_intervals = self.all_intervals
2529
+ # Activate the writing of the internal variables
2530
+ curCatch.activate_all_internal_variables()
2531
+ # Prepare the Excel writer
2532
+ writer_tot = pd.ExcelWriter(os.path.join(self.workingDir, "all_best_tests.xlsx"), engine = 'xlsxwriter')
2533
+
2534
+ for iOpti in range(len(sortJct)):
2535
+ stationOut = sortJct[iOpti]
2536
+ logging.info("==================")
2537
+ logging.info("Station : "+stationOut)
2538
+ # Build the current compare.txt file and replace all nan values by 0.0
2539
+ self.save_current_compare_file(stationOut=stationOut)
2540
+ # Save the name of the station that will be the output
2541
+ curCatch.define_station_out(stationOut)
2542
+ # Activate all the useful subs and write it in the param file
2543
+ curCatch.activate_usefulSubs(blockJunction=doneList, onlyItself=onlyOwnSub)
2544
+ # Select correct calibration intervals -> remove the intervals with NaN
2545
+ cur_intervals = self.select_opti_intervals(all_intervals=all_intervals, stationOut=stationOut, filter_nan=True)
2546
+ self.save_opti_dates_to_file(cur_intervals)
2547
+ # Rename the result file
2548
+ self.optiParam.change_param("Optimizer", "fname", stationOut)
2549
+ self.optiParam.SavetoFile(None)
2550
+ self.optiParam.Reload(None)
2551
+ self.update_myParams(idLauncher)
2552
+ # Prepare the paramPy dictionnary before calibration
2553
+ self.prepare_calibration_timeDelay(stationOut=stationOut)
2554
+ # Reload the useful modules
2555
+ self.reload_hydro(idCompar=0, fromStation=stationOut, lastLevel=previousLevel, updateAll=True)
2556
+ ## =======
2557
+ ## Init
2558
+ ## =======
2559
+ self.init_optimizer(idOpti)
2560
+ self.associate_ptr(None, idOpti=idOpti)
2561
+ # Get the best parameters to test
2562
+ all_params = self.get_best_params(stationOut=stationOut, quantile=quantile_Nash, std=std_Nash, apply_clustering=clustering_Nash)
2563
+ ## =======
2564
+ ## Compute
2565
+ ## =======
2566
+ all_frac = []
2567
+
2568
+ for i in range(len(all_params)):
2569
+ cur_p = all_params[i, :-1]
2570
+ cur_obj = all_params[i, -1]
2571
+ cur_obj2 = self.evaluate_model_optimizer(cur_p, idOpti=idOpti)
2572
+ print("cur_obj : ", cur_obj, " ; cur_obj2 : ", cur_obj2)
2573
+ if cur_obj != cur_obj2:
2574
+ logging.error("The objective function is not the same as the one computed by the model!")
2575
+ logging.error("cur_obj : "+str(cur_obj)+" ; cur_obj2 : "+str(cur_obj2))
2576
+ # assert cur_obj == cur_obj2, "The objective function is not the same as the one computed by the model!"
2577
+ self.write_mesh_results_optimizer(idOpti=idOpti)
2578
+ # Save all the variables/evaluations desired
2579
+ frac_dict = self._get_cur_fractions(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
2580
+ cur_all_frac = list(frac_dict.values())
2581
+ p_excess = self._get_exceedance(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
2582
+ max_sim_obs = self._get_ratio_max_sim_obs(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
2583
+ # Extract the time delays
2584
+ all_timeDelays = curCatch.get_timeDelays_inlets(ref=stationOut)
2585
+ all_timeDelays_str = {key : str(datetime.timedelta(seconds=all_timeDelays[key])) for key in all_timeDelays}
2586
+ cur_timeDelays = list(all_timeDelays_str.values())
2587
+ # Concatenate all the informations
2588
+ cur_all_frac = list(cur_p) + cur_timeDelays + cur_all_frac + [p_excess, max_sim_obs, cur_obj]
2589
+ all_frac.append(cur_all_frac)
2590
+
2591
+ # Get param names
2592
+ names = self.get_param_names(idLauncher=idLauncher, stationOut=stationOut)
2593
+ # Save the evaluations
2594
+ var_names = names + list(all_timeDelays_str.keys()) + list(frac_dict.keys()) + ["P. of exceedance", "Qmax_simul/Q_max_measure", "Nash"]
2595
+ cur_df = pd.DataFrame(all_frac, columns=var_names)
2596
+ # write first the tempory results for each station
2597
+ writer_stat = pd.ExcelWriter(os.path.join(self.workingDir, stationOut+"_tests.xlsx"), engine = 'xlsxwriter')
2598
+ cur_df.to_excel(writer_stat, sheet_name=stationOut, columns=var_names)
2599
+ writer_stat.sheets[stationOut].autofit()
2600
+ writer_stat.close()
2601
+ # write now the informations for all the stations in the same excel file
2602
+ cur_df.to_excel(writer_tot, sheet_name=stationOut, columns=var_names)
2603
+ writer_tot.sheets[stationOut].autofit()
2604
+
2605
+ ## =======
2606
+ ## =======
2607
+ # Collect the best parameters and their objective function(s)
2608
+ best_params = self.apply_optim(None)
2609
+ # Simulation with the best parameters
2610
+ self.compute_distributed_hydro_model()
2611
+ # Update myHydro of all effective subbasins to get the best configuration upstream
2612
+ curCatch.read_hydro_eff_subBasin()
2613
+ # Update timeDelays according to time wolf_array
2614
+ self.apply_timeDelay_dist(idOpti=idOpti, idLauncher=idLauncher, junctionKey=stationOut)
2615
+ # Update the outflows
2616
+ curCatch.update_hydro(idCompar=0)
2617
+
2618
+ # All upstream elements of a reference will be fixed
2619
+ doneList.append(stationOut)
2620
+ previousLevel = curCatch.levelOut
2621
+
2622
+ writer_tot.close()
2623
+ logging.info("The equifinality test is finished!")
2624
+
2625
+
2626
+ def get_best_params(self, stationOut:str,
2627
+ criterion:str="Nash", quantile:float=0.99, std:float=0.05, eps:float=0.1,
2628
+ objective_fct:bool= True, apply_clustering:bool=False):
2629
+ from sklearn.cluster import DBSCAN
2630
+ """
2631
+ Get the best parameters for a given station.
2632
+
2633
+ Args:
2634
+ stationOut (str): The station identifier.
2635
+ idLauncher (int, optional): The id of the launcher. Defaults to 0.
2636
+
2637
+ Returns:
2638
+ np.array: The best parameters.
2639
+
2640
+ Raises:
2641
+ None
2642
+ """
2643
+
2644
+ best_objfct = self.collect_optim()[-1]
2645
+ all_params, all_obj_fct = self.read_all_attempts_SA(format=".dat", all_attempts=True)
2646
+
2647
+ quantile_cond = (all_obj_fct > np.quantile(all_obj_fct, quantile))
2648
+ std_cond = (all_obj_fct > best_objfct*(1-std))
2649
+ all_cond = np.where(np.logical_and(quantile_cond, std_cond))[0]
2650
+ eff_params = all_params[all_cond]
2651
+ eff_obj = all_obj_fct[all_cond]
2652
+
2653
+ if objective_fct:
2654
+ eff_params = np.column_stack((eff_params, eff_obj))
2655
+
2656
+ # In this part we filter abd remove the parameters that are almost equivalent
2657
+ # To do so, we use the DBSCAN clustering algorithm to group the parameters that are close to each other
2658
+ # and only keep the set of parameter that has the best Nash-Sutcliffe efficiency per group
2659
+ # The parameters that are not grouped are considered had "particular" and are still kept in the final set
2660
+ if apply_clustering:
2661
+ # "Normalise" or scale btw [0;1] the parameter vector to make the clustering more efficient
2662
+ min_param = np.min(eff_params, axis=0)
2663
+ max_param = np.max(eff_params, axis=0)
2664
+ norm_params = (eff_params-min_param)/(max_param-min_param)
2665
+ db = DBSCAN(eps=eps).fit(norm_params)
2666
+ labels = db.labels_
2667
+ # Extraction of the number of groups and particular cases
2668
+ n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
2669
+ n_noise = list(labels).count(-1)
2670
+ noise_ind = np.where(labels==-1)[0]
2671
+
2672
+ # First extract all the vectors that are grouped and their indices
2673
+ grouped_ind = db.core_sample_indices_
2674
+ grouped_params = eff_params[grouped_ind]
2675
+ grouped_labels = labels[grouped_ind]
2676
+
2677
+ # Init of the filtered parameters vector
2678
+ filtered_params = np.zeros((n_clusters+n_noise, np.shape(eff_params)[1]))
2679
+ # Loop to determine the best set of parameter per group
2680
+ best_indices_per_group = np.zeros(n_clusters, dtype=int)
2681
+ for i in range(n_clusters):
2682
+ cur_indices = np.where(grouped_labels==i)[0]
2683
+ cur_group = grouped_params[cur_indices]
2684
+ best_indices_per_group[i] = np.argmax(cur_group[:,-1])
2685
+
2686
+ # Keep the best set of parameters per group
2687
+ filtered_params[:n_clusters] = grouped_params[best_indices_per_group]
2688
+ # Keep all the element that could not be grouped
2689
+ filtered_params[n_clusters:] = eff_params[noise_ind]
2690
+
2691
+ return filtered_params
2692
+
2693
+ return eff_params
2694
+
2695
+
2696
+ # FIXME : interp function used -> add the method of interpolation as an argument
2697
+ def _get_exceedance(self, idLauncher:int=0, stationOut:str="",
2698
+ intervals:list[tuple[datetime.datetime, datetime.datetime]]=[]) -> float:
2699
+ curCatch:Catchment = self.myCases[idLauncher].refCatchment
2700
+ cur_key = curCatch.get_key_catchmentDict(stationOut)
2701
+ curBasin: SubBasin = curCatch.catchmentDict[cur_key]
2702
+
2703
+ simul = curBasin.outFlow
2704
+ measure = self.compareSubBasins[stationOut]
2705
+ compare = np.interp(curCatch.time, measure.time, measure.outFlow)
2706
+
2707
+ if intervals != []:
2708
+ interv = np.zeros(len(curCatch.time), dtype=bool)
2709
+ for el in intervals:
2710
+ date_i = datetime.datetime.timestamp(el[0])
2711
+ date_f = datetime.datetime.timestamp(el[1])
2712
+ interv += (curCatch.time>=date_i) & (curCatch.time<=date_f)
2713
+ else:
2714
+ interv = np.ones(len(curCatch.time), dtype=bool)
2715
+
2716
+ eff_simul = simul[interv]
2717
+ eff_compare = compare[interv]
2718
+
2719
+ q_diff = np.count_nonzero((eff_simul-eff_compare <0.0) & (eff_simul != 0.0))/np.count_nonzero((eff_simul != 0.0))
2720
+
2721
+ return q_diff
2722
+
2723
+
2724
+ # FIXME : to improve and generalise
2725
+ def _get_cur_fractions(self, idLauncher:int=0, stationOut:str="",
2726
+ intervals:list[tuple[datetime.datetime, datetime.datetime]]=[]) -> dict[list[str], list[float]]:
2727
+ """
2728
+ Save the evaluations of the model.
2729
+
2730
+ Args:
2731
+ idOpti (int, optional): The id of the optimisation. Defaults to 1.
2732
+ stationOut (str, optional): The station identifier. Defaults to "".
2733
+ fct_list (list[str], optional): A list of functions. Defaults to [].
2734
+
2735
+ Returns:
2736
+ None
2737
+
2738
+ Raises:
2739
+ None
2740
+ """
2741
+ curCatch:Catchment = self.myCases[idLauncher].refCatchment
2742
+ cur_key = curCatch.get_key_catchmentDict(stationOut)
2743
+ curBasin: SubBasin = curCatch.catchmentDict[cur_key]
2744
+ cur_fracts = curBasin.get_summary_fractions(summary="mean", interval=intervals)
2745
+
2746
+ return cur_fracts
2747
+
2748
+
2749
+ def _get_ratio_max_sim_obs(self, idLauncher:int=0, stationOut:str="",
2750
+ intervals:list[tuple[datetime.datetime, datetime.datetime]]=[]) -> float:
2751
+
2752
+ curCatch:Catchment = self.myCases[idLauncher].refCatchment
2753
+ cur_key = curCatch.get_key_catchmentDict(stationOut)
2754
+ curBasin: SubBasin = curCatch.catchmentDict[cur_key]
2755
+ measure = self.compareSubBasins[stationOut]
2756
+
2757
+ if intervals != []:
2758
+ interv_simul = np.zeros(len(curCatch.time), dtype=bool)
2759
+ interv_meas = np.zeros(len(measure.time), dtype=bool)
2760
+ for el in intervals:
2761
+ date_i = datetime.datetime.timestamp(el[0])
2762
+ date_f = datetime.datetime.timestamp(el[1])
2763
+ interv_simul += (curCatch.time>=date_i) & (curCatch.time<=date_f)
2764
+ interv_meas += (measure.time>=date_i) & (measure.time<=date_f)
2765
+ else:
2766
+ interv_simul = np.ones(len(curCatch.time), dtype=bool)
2767
+ interv_meas = np.ones(len(measure.time), dtype=bool)
2768
+
2769
+ simul = curBasin.outFlow[interv_simul]
2770
+ compare = measure.outFlow[interv_meas]
2771
+ ratio = np.nanmax(simul)/np.nanmax(compare)
2772
+
2773
+ return ratio
2774
+
2775
+
2776
+ # Here, we condider that the parameters were already sorted, i.e. model parameters first and Python parameters (<0) after
2777
+ def get_param_names(self, idLauncher:int=0, stationOut:str=""):
2778
+ curCatch:Catchment = self.myCases[idLauncher].refCatchment
2779
+ myModelDict = cste.modelParamsDict[curCatch.myModel]["Parameters"]
2780
+ id_params = [self.myParams[i]["type"] for i in range(1,self.nbParams+1)]
2781
+ names = [myModelDict[cur_id]["Name"] for cur_id in id_params if cur_id>0]
2782
+
2783
+ calibrate_timeDelay = bool(int(self.optiParam.get_param("Semi-Distributed", "Calibrate_times")))
2784
+ if calibrate_timeDelay:
2785
+ # Get_nb inlets
2786
+ inletsNames = self.myCases[idLauncher].refCatchment.get_inletsName(stationOut)
2787
+ nbInlets = len(inletsNames)
2788
+ for i in range(nbInlets):
2789
+ names.append("TimeDelay "+inletsNames[i])
2790
+
2791
+
2792
+ return names
2793
+
2794
+ # Plot the equifinalty test for each station
2795
+ def plot_equifinality(self, event, idLauncher:int=0):
2796
+
2797
+ physical_properties = ["%qof", "%qif", "%qbf", "%loss"]
2798
+ colors_properties = ["b", "g", "k", "orange"]
2799
+ y_label = "Nash"
2800
+
2801
+ if self.myStations==[]:
2802
+ self.set_compare_stations(idLauncher=idLauncher)
2803
+ sortJct = self.myStations
2804
+
2805
+ for iOpti in range(len(sortJct)):
2806
+ stationOut = sortJct[iOpti]
2807
+ filename = os.path.join(self.workingDir, stationOut+"_tests.xlsx")
2808
+ if os.path.isfile(filename):
2809
+ df = pd.read_excel(filename, sheet_name=stationOut)
2810
+ # Plot the physical properties
2811
+ fig, ax = plt.subplots()
2812
+ for cur_prop, cur_color in zip(physical_properties, colors_properties):
2813
+ cur_columns = [col for col in df.columns if cur_prop in col.replace(" ", "")]
2814
+ if cur_columns != []:
2815
+ corr_prop = cur_columns[0]
2816
+ ax.scatter(df.loc[:,corr_prop], df.loc[:,y_label], s=0.5, c=cur_color,
2817
+ marker='o', label=cur_prop, alpha=0.4)
2818
+ ax.set_xlabel("% of the rain [-]")
2819
+ ax.set_ylabel(y_label+" [-]")
2820
+ ax.set_title("Proportion of rain : "+stationOut)
2821
+ ax.legend()
2822
+ fig.savefig(os.path.join(self.workingDir, "Equifinality_physical_prop_"+stationOut+".png"))
2823
+ # Plot the Probability of exceedance
2824
+ cur_color = colors_properties[0]
2825
+ x_label = "P. of exceedance"
2826
+ fig, ax = plt.subplots()
2827
+ if x_label in df.columns:
2828
+ ax.scatter(df.loc[:,x_label], df.loc[:,y_label], s=0.5, c=cur_color, marker='o', label=x_label)
2829
+ ax.set_xlabel(x_label +" [-]")
2830
+ ax.set_ylabel(y_label+" [-]")
2831
+ ax.set_title("Probability of Q_sim > Q_meas : "+stationOut)
2832
+ ax.legend()
2833
+ fig.savefig(os.path.join(self.workingDir, "Equifinality_prob_excess_"+stationOut+".png"))
2834
+ # Plot Q_sim/Q_max
2835
+ x_label = "Qmax_simul/Q_max_measure"
2836
+ fig, ax = plt.subplots()
2837
+ if x_label in df.columns:
2838
+ ax.scatter(df.loc[:,x_label], df.loc[:,y_label], s=0.5, c=cur_color, marker='o', label=x_label)
2839
+ ax.set_xlabel(x_label +" [-]")
2840
+ ax.set_ylabel(y_label+" [-]")
2841
+ ax.set_title("Peak analysis : "+stationOut)
2842
+ ax.legend()
2843
+ fig.savefig(os.path.join(self.workingDir, "Equifinality_peaks_ratio_"+stationOut+".png"))
2844
+
2845
+ else:
2846
+ logging.error("The file "+filename+" does not exist!")
2847
+
2848
+ plt.show()
2849
+
2850
+
2851
+ def add_Case(self, idLauncher:int=0):
2852
+
2853
+ i = idLauncher
2854
+ newCase = CaseOpti()
2855
+ launcherDir = self.optiParam.get_param("Cases","dir_"+str(i+1))
2856
+ isOk, launcherDir = check_path(launcherDir, prefix=self.workingDir, applyCWD=True)
2857
+ if isOk<0:
2858
+ print("ERROR : in path of launcherDir")
2859
+ newCase.read_param(launcherDir, copyDefault=False, callback=self.update_parameters_launcher)
2860
+ # FIXME TO CHANGE when seperation with the GUI
2861
+ if self.wx_exists:
2862
+ newId = wx.Window.NewControlId()
2863
+ iMenu = self.MenuBar.FindMenu('Param files')
2864
+ paramMenu = self.MenuBar.Menus[iMenu][0]
2865
+ curName = 'Case '+str(i+1)
2866
+ iItem = self.MenuBar.FindMenuItem('Param files', curName)
2867
+ if(iItem==wx.NOT_FOUND):
2868
+ caseMenu = wx.Menu()
2869
+ paramCaseFile = caseMenu.Append(wx.ID_ANY, 'launcher.param')
2870
+ self.Bind(wx.EVT_MENU, newCase.show_launcherParam, paramCaseFile)
2871
+ guiHydroCase = caseMenu.Append(wx.ID_ANY, 'GUI Hydro')
2872
+ refDir = newCase.launcherParam.get_param("Calculs","Répertoire simulation de référence")
2873
+ isOk, refDir = check_path(refDir, prefix=launcherDir, applyCWD=True)
2874
+ if isOk<0:
2875
+ print("ERROR : in path of launcherDir")
2876
+ newCase.mydro = HydrologyModel(dir=refDir)
2877
+ newCase.mydro.Hide()
2878
+ self.Bind(wx.EVT_MENU, newCase.show_mydro, guiHydroCase)
2879
+ curCase = paramMenu.Append(newId, curName, caseMenu)
2880
+ else:
2881
+ print("WARNING : this scenario was not implemented yet. This might induce an error!")
2882
+ # iItem =
2883
+ curCase = paramMenu.Replace(iItem)
2884
+ else:
2885
+ refDir = newCase.launcherParam.get_param("Calculs","Répertoire simulation de référence")
2886
+ isOk, refDir = check_path(refDir, prefix=launcherDir, applyCWD=True)
2887
+ newCase.mydro = HydrologyModel(dir=refDir)
2888
+
2889
+ self.Bind(wx.EVT_MENU, newCase.show_launcherParam, curCase)
2890
+ newCase.idMenuItem = newId
2891
+ self.myCases.append(newCase)
2892
+
2893
+
2894
+
1964
2895
  def make_nd_array(self, c_pointer, shape, dtype=np.float64, order='C', own_data=True,readonly=False):
1965
2896
  arr_size = np.prod(shape[:]) * np.dtype(dtype).itemsize
1966
2897
 
@@ -1977,3 +2908,4 @@ class Optimisation(wx.Frame):
1977
2908
  return arr.copy()
1978
2909
  else:
1979
2910
  return arr
2911
+