wolfhece 2.1.13__py3-none-any.whl → 2.1.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
wolfhece/PyDraw.py CHANGED
@@ -191,7 +191,7 @@ class DragdropFileTarget(wx.FileDropTarget):
191
191
  id = id + '_1'
192
192
 
193
193
  try:
194
- newobj = Zones(filename=name)
194
+ newobj = Zones(filename=name, parent=self.window)
195
195
  self.window.add_object('vector', newobj = newobj, id = id)
196
196
  except:
197
197
  logging.error(_('Error while loading vector : ') + name)
@@ -203,7 +203,7 @@ class DragdropFileTarget(wx.FileDropTarget):
203
203
  id = id + '_1'
204
204
 
205
205
  try:
206
- newobj = cloud_vertices(fname=name)
206
+ newobj = cloud_vertices(fname=name, mapviewer=self.window)
207
207
  self.window.add_object('cloud', newobj = newobj, id = id)
208
208
  except:
209
209
  logging.error(_('Error while loading cloud : ') + name)
@@ -380,6 +380,12 @@ class WolfMapViewer(wx.Frame):
380
380
 
381
381
  self.filemenu.AppendSeparator()
382
382
 
383
+ # SIMULATION Hydrologique
384
+
385
+ hydrol = self.filemenu.Append(wx.ID_ANY, _('Open hydrological model'), _('Hydrological simulation'))
386
+
387
+ self.filemenu.AppendSeparator()
388
+
383
389
  # MULTIVIEWER
384
390
 
385
391
  compareitem = self.filemenu.Append(wx.ID_ANY, _('Set comparison'), _('Set comparison'))
@@ -665,6 +671,13 @@ class WolfMapViewer(wx.Frame):
665
671
  self._wxlogging = value
666
672
 
667
673
 
674
+ def open_hydrological_model(self):
675
+ """ Open a hydrological model """
676
+
677
+ from .PyGui import HydrologyModel
678
+
679
+ newview = HydrologyModel(splash = False)
680
+
668
681
  def create_2D_MB_model(self):
669
682
  """ Create a 2D model """
670
683
 
@@ -4731,6 +4744,10 @@ class WolfMapViewer(wx.Frame):
4731
4744
 
4732
4745
  self.create_2D_MB_model()
4733
4746
 
4747
+ elif itemlabel == _('Open hydrological model'):
4748
+
4749
+ self.open_hydrological_model()
4750
+
4734
4751
  elif itemlabel == _('Check headers'):
4735
4752
 
4736
4753
  self.check_2D_MB_headers()
wolfhece/PyGui.py CHANGED
@@ -276,19 +276,32 @@ class HydrologyModel(GenMapManager):
276
276
  ('_encode.sub','Coded index SubB [-]')]}
277
277
 
278
278
 
279
- self.files_hydrology_vectors={'Characteristic_maps':[('.delimit.vec','Watershed')],
279
+ self.files_hydrology_vectors={'Characteristic_vectors':[('.delimit.vec','Watershed')],
280
280
  'Whole_basin':[('Rain_basin_geom.vec','Rain geom'),
281
281
  ('Evap_basin_geom.vec','Evapotranspiration geom')]}
282
282
 
283
283
  for curfile in self.files_hydrology_array['Characteristic_maps']:
284
284
  curext=curfile[0]
285
- curidx=curfile[1]
285
+ curidx=curfile[1]
286
286
  self.mapviewer.add_object(which='array',filename=self.mydircharact+curext,id=curidx,ToCheck=False)
287
287
 
288
- for curfile in self.files_hydrology_vectors['Characteristic_maps']:
288
+
289
+ for curfile in self.files_hydrology_vectors['Characteristic_vectors']:
289
290
  curext=curfile[0]
290
291
  curidx=curfile[1]
291
- self.mapviewer.add_object(which='vector',filename=self.mydircharact+curext,id=curidx,ToCheck=False)
292
+
293
+ delimit = Zones(filename=self.mydircharact+curext, mapviewer=self.mapviewer, parent = self.mapviewer)
294
+
295
+ for idx, cur_zone in enumerate(delimit.myzones):
296
+ cur_sub = self.mycatchment.get_subBasin(idx+1)
297
+ cur_zone.myname = cur_sub.name
298
+ cur_vect = cur_zone.myvectors[0]
299
+ cur_vect.set_legend_to_centroid(cur_sub.name + ' - ' + str(cur_sub.iDSorted), visible=True)
300
+ cur_vect.myprop.legendfontsize = 12
301
+
302
+ delimit.reset_listogl()
303
+
304
+ self.mapviewer.add_object(which='vector',newobj = delimit, id=curidx, ToCheck=True)
292
305
 
293
306
  for curfile in self.files_hydrology_vectors['Whole_basin']:
294
307
  curext=curfile[0]
@@ -297,7 +310,17 @@ class HydrologyModel(GenMapManager):
297
310
  self.mapviewer.add_object(which='vector',filename=self.mydirwhole+curext,id=curidx,ToCheck=False)
298
311
 
299
312
  self.mapviewer.add_object(which='vector',newobj=self.myexchanges.mysegs,id='Forced exchanges',ToCheck=False)
313
+
314
+ zones_RT = self.mycatchment.get_retentionbasin_zones()
315
+ zones_RT.parent = self
316
+ self.mapviewer.add_object(which='vector',newobj=zones_RT,id='Anthropic links',ToCheck=False)
317
+
300
318
  self.mapviewer.add_object(which='cloud',newobj=self.mycatchment.subBasinCloud,id='Local outlets',ToCheck=False)
319
+ self.mapviewer.add_object(which='cloud',newobj=self.mycatchment.retentionBasinCloud,id='Anthropic inlets/outlets',ToCheck=False)
320
+
321
+ self.mycatchment.subBasinCloud.set_mapviewer(self.mapviewer)
322
+ self.mycatchment.retentionBasinCloud.set_mapviewer(self.mapviewer)
323
+
301
324
  self.mapviewer.add_object(which='cloud',newobj=self.myexchanges.mycloudup,id='Up nodes',ToCheck=False)
302
325
  self.mapviewer.add_object(which='cloud',newobj=self.myexchanges.myclouddown,id='Down nodes',ToCheck=False)
303
326
 
wolfhece/PyParams.py CHANGED
@@ -1064,7 +1064,7 @@ class Wolf_Param(wx.Frame):
1064
1064
  logging.debug("String type will be conserved! -- {}".format(value_param))
1065
1065
 
1066
1066
  if type(value_param) != int:
1067
- logging.warning("Parameters -- EnumProperty -- Value {} is not an integer".format(value_param))
1067
+ logging.warning("Parameters -- EnumProperty -- Value {} is not an integer in file : {}".format(value_param, self.filename))
1068
1068
  logging.debug("EnumProperty value must be an integer")
1069
1069
 
1070
1070
  page.Append(pg.EnumProperty(label= param_name, name= locname, labels= list_keys, values= list_values, value= int(value_param)))
@@ -2523,7 +2523,12 @@ class zone:
2523
2523
 
2524
2524
  :param prep: True = préparation des listes OpenGL ; False = affichage direct
2525
2525
  """
2526
+
2526
2527
  if prep:
2528
+ if len(self.myvectors) == 0:
2529
+ logging.warning(_('No vector in zone -- {}').format(self.myname))
2530
+ return
2531
+
2527
2532
  try:
2528
2533
  if self.idgllist==-99999:
2529
2534
  self.idgllist = glGenLists(1)
@@ -2540,6 +2545,10 @@ class zone:
2540
2545
  except:
2541
2546
  logging.error(_('OpenGL error in zone.plot'))
2542
2547
  else:
2548
+ if len(self.myvectors) == 0:
2549
+ logging.warning(_('No vector in zone -- {}').format(self.myname))
2550
+ return
2551
+
2543
2552
  if self.idgllist!=-99999:
2544
2553
  glCallList(self.idgllist)
2545
2554
  else:
@@ -4811,6 +4820,10 @@ class Zones(wx.Frame, Element_To_Draw):
4811
4820
  return expended_items
4812
4821
 
4813
4822
  def restore_tree_state(tree:TreeListCtrl, expended_items):
4823
+
4824
+ if len(expanded)==0:
4825
+ # Nothing to do
4826
+ return
4814
4827
 
4815
4828
  root = tree.GetRootItem()
4816
4829
 
@@ -5837,6 +5850,7 @@ class Zones(wx.Frame, Element_To_Draw):
5837
5850
 
5838
5851
  Pousse la même information dans l'objet parent s'il existe
5839
5852
  """
5853
+
5840
5854
  if self.wx_exists:
5841
5855
  self.active_zone = object
5842
5856
 
wolfhece/apps/version.py CHANGED
@@ -5,7 +5,7 @@ class WolfVersion():
5
5
 
6
6
  self.major = 2
7
7
  self.minor = 1
8
- self.patch = 13
8
+ self.patch = 14
9
9
 
10
10
  def __str__(self):
11
11
 
wolfhece/fonts/arial.ttf CHANGED
Binary file
@@ -53,10 +53,9 @@ class Catchment:
53
53
  _version:float # version of the wolfHydro python code. Useful for identifying the file versions to read and how to interpret them
54
54
  charact_watrshd:Watershed # Watershed object containing the most useful properties of the arrays in Characteristics maps
55
55
  subBasinCloud:cloud_vertices # cloud of points containing the true coordinates (used in simulation) of all subbasin outlets
56
+ retentionBasinCloud:cloud_vertices # cloud of points containing the true coordinates (used in simulation) of all retention basins
56
57
  iP_Cloud:cloud_vertices # cloud of points containing the given coordinates (given in param files) of all subbasin outlets
57
58
 
58
- subBasinDict:dict[int:SubBasin]
59
-
60
59
  catchmentDict:dict[Union[str, int], Union[SubBasin, RetentionBasin]] # dictionnary containing all the elements of the catchment
61
60
  subBasinDict:dict[int, SubBasin] # dictionnary containing all the subbasins
62
61
 
@@ -85,6 +84,10 @@ class Catchment:
85
84
  self.subBasinCloud.myprop.color=getIfromRGB((255,131,250))
86
85
  self.subBasinCloud.myprop.filled=True
87
86
 
87
+ self.retentionBasinCloud=cloud_vertices()
88
+ self.retentionBasinCloud.myprop.color=getIfromRGB((0,131,255))
89
+ self.retentionBasinCloud.myprop.filled=True
90
+
88
91
  self.iP_Cloud=cloud_vertices()
89
92
  self.iP_Cloud.myprop.color=getIfromRGB((255,131,250))
90
93
  self.iP_Cloud.myprop.filled=True
@@ -221,6 +224,8 @@ class Catchment:
221
224
  # Iterate through the Input params dictionnary
222
225
  self.create_ObjectsInCatchment()
223
226
 
227
+ self.charact_watrshd.set_names_subbasins([(cur.iDSorted, cur.name) for cur in self.subBasinDict.values()])
228
+
224
229
  # self.add_hyetoToDict()
225
230
 
226
231
 
@@ -323,8 +328,27 @@ class Catchment:
323
328
  if(self.plotAllSub):
324
329
  self.plot_allSub()
325
330
 
331
+ # self.charact_watrshd.impose_sorted_index_subbasins([cur.iDSorted for cur in self.subBasinDict.values()])
326
332
 
333
+ self._fill_cloud_retentionbasin()
334
+
335
+ def get_subBasin(self, id_sorted_or_name:int | str) -> SubBasin:
336
+ """
337
+ This method returns the subbasin object associated with the sorted id or name given in argument.
338
+
339
+ The sorted id is the one given by the Fortran code.
340
+ """
327
341
 
342
+ if isinstance(id_sorted_or_name, str):
343
+ for cursub in self.subBasinDict.values():
344
+ if(cursub.name.lower() == id_sorted_or_name.lower()):
345
+ return cursub
346
+
347
+ elif isinstance(id_sorted_or_name, int):
348
+ for cursub in self.subBasinDict.values():
349
+ if(cursub.iDSorted == id_sorted_or_name):
350
+ return cursub
351
+ return None
328
352
 
329
353
 
330
354
  def get_time(self):
@@ -565,6 +589,7 @@ class Catchment:
565
589
  self.junctionNamesDict[self.paramsTopology.myparams[element]["outlet "+str(iOutlet+1)][key_Param.VALUE]] = element
566
590
 
567
591
  self.retentionBasinDict[element] = RetentionBasin(self.dateBegin, self.dateEnd, self.deltaT, self.time, idBasin, nameBasin, typeOfRB, self.paramsRB.myparams, _tz=self.tz, _outletNames=myOutletsNames, _workingDir=self.workingDir)
592
+
568
593
  # Save the RB in the RB dictionnary into the global Catchment dictionnary
569
594
  self.catchmentDict[element] = self.retentionBasinDict[element]
570
595
 
@@ -573,6 +598,25 @@ class Catchment:
573
598
  print("ERROR: This type of junction is unknown. Please check the topo postprocess file")
574
599
  sys.exit()
575
600
 
601
+ def _fill_cloud_retentionbasin(self):
602
+ """ This procedure fills the cloud of the retention basin with the vertices of the retention basin and its inlets. """
603
+
604
+ for curRT in self.retentionBasinDict.values():
605
+ curRT:RetentionBasin
606
+ self.retentionBasinCloud.add_vertex(wolfvertex(curRT.x,curRT.y))
607
+ inlet_coords = curRT.get_inletCoords()
608
+ for cur_inlet in inlet_coords:
609
+ self.retentionBasinCloud.add_vertex(wolfvertex(cur_inlet[0],cur_inlet[1]))
610
+
611
+ def get_retentionbasin_zones(self)-> Zones:
612
+ """ This method returns a Zones instance of the retention basins. """
613
+
614
+ zones = Zones()
615
+ for curRB in self.retentionBasinDict.values():
616
+ curRB:RetentionBasin
617
+ zones.add_zone(curRB.get_zone(), forceparent=True)
618
+
619
+ return zones
576
620
 
577
621
 
578
622
  def link_objects(self):
@@ -1367,7 +1411,7 @@ class Catchment:
1367
1411
  excelData[0].append(self.topologyDict[level][curBasin].x)
1368
1412
  excelData[1].append(self.topologyDict[level][curBasin].y)
1369
1413
  for curChar in self.topologyDict[level][curBasin].mainCharactDict:
1370
- excelData[iChar].append(self.topologyDict[level][curBasin].mainCharactDict[curChar][key_Param.VALUE])
1414
+ excelData[iChar].append(self.topologyDict[level][curBasin].mainCharactDict[curChar]["value"])
1371
1415
  iChar += 1
1372
1416
  iBasin += 1
1373
1417
 
@@ -1803,6 +1847,46 @@ class Catchment:
1803
1847
  # paramsInput.ApplytoMemory(None)
1804
1848
  paramsInput.SavetoFile(None)
1805
1849
 
1850
+
1851
+ def _correct_Umax_from_old_model(self, adapt_with_rain:bool=True):
1852
+ fileName = "simul_soil.param"
1853
+ which="Umax"
1854
+
1855
+ for iBasin in range(1,len(self.subBasinDict)+1):
1856
+ myBasin = self.subBasinDict[iBasin]
1857
+ dirID = myBasin.iDSorted
1858
+
1859
+ fileToModif = os.path.join(self.workingDir, "Subbasin_" + str(dirID), fileName)
1860
+
1861
+ paramsInput = Wolf_Param(to_read=False,toShow=False)
1862
+ paramsInput.ReadFile(fileToModif)
1863
+
1864
+ if adapt_with_rain:
1865
+ myInterval = paramsInput.get_param("Distributed production model parameters", "Time span soil", default_value=0.0)
1866
+ if myInterval==0.0:
1867
+ nbIntervals = len(myBasin.myRain)-1
1868
+ else:
1869
+ nbIntervals = math.floor(myInterval/myBasin.deltaT)
1870
+ kernel = np.ones(nbIntervals)
1871
+ volRain = np.convolve(myBasin.myRain, kernel)*myBasin.deltaT/3600.0
1872
+ maxRain = np.max(volRain)
1873
+ else:
1874
+ maxRain = paramsInput.get_param("Distributed production model parameters", "Umax")
1875
+ if maxRain==0.0:
1876
+ logging.warning("The Umax is not adapted with the rain and its value is 0.0. It might be better to put 'adapt_with_rain' to True.")
1877
+
1878
+ k = paramsInput.get_param("Horton parameters", "k", default_value=0.0)
1879
+ if k==0.0:
1880
+ continue
1881
+
1882
+ U_max = maxRain/k
1883
+
1884
+ paramsInput.change_param("Distributed production model parameters", which, U_max)
1885
+ paramsInput.change_param("Horton parameters", "k", 0.0)
1886
+
1887
+ # paramsInput.ApplytoMemory(None)
1888
+ paramsInput.SavetoFile(None)
1889
+
1806
1890
 
1807
1891
 
1808
1892
  def plot_all_diff_cumulRain_with_lagtime(self, interval, lagTime, selection_by_iD=[], graph_title="", show=True, writeDir="", lawNetRain=0, netRainParams={}):
@@ -581,7 +581,7 @@ class Comparison:
581
581
  tmpHydro = cur_module.get_direct_insideRB_inlets(unit='m3/s')
582
582
 
583
583
  y1.append(tmpHydro[:])
584
- elif(curCatch.myModel==cst.tom_2layers_linIF or curCatch.myModel==cst.tom_2layers_UH):
584
+ else:
585
585
 
586
586
  # tmp = curCatch.retentionBasinDict["J18"].directFluxInRB
587
587
  # tmpHydro = np.zeros(len(tmp))
@@ -664,7 +664,7 @@ class Comparison:
664
664
  print("ERROR: the simulation time is not long enough for this subbasin to be taken into account")
665
665
  sys.exit()
666
666
  y1.append(tmpHydro[:])
667
- elif(curCatch.myModel==cst.tom_2layers_linIF or curCatch.myModel==cst.tom_2layers_UH):
667
+ else:
668
668
  tmp = curCatch.retentionBasinDict["J16"].directFluxInRB
669
669
  tmpHydro = np.zeros(len(tmp))
670
670
 
@@ -1588,7 +1588,7 @@ class Comparison:
1588
1588
  # The following lines is take the peak difference between simulation and measurements -> Display 0.0 if the measurement is 0.0
1589
1589
  isZero = np.array(meas_peak)==0
1590
1590
  notZero = np.array(meas_peak)!=0
1591
- peak_prop = {stationKey[i]: [ list( (np.array(meas_peak[i])-np.array(el))/(np.array(meas_peak[i])+isZero[i]) *notZero[i] )
1591
+ peak_prop = {stationKey[i]: [ list( (np.array(el)-np.array(meas_peak[i]))/(np.array(meas_peak[i])+isZero[i]) *notZero[i] )
1592
1592
  for el in all_peaks[i] ]
1593
1593
  for i in range(len(stationKey))
1594
1594
  }
@@ -1640,13 +1640,52 @@ class Comparison:
1640
1640
  sorted_keys = list(all_data[0].keys())
1641
1641
 
1642
1642
  ## Str of dates
1643
- all_names = ["-".join([cdate[0].strftime("%d/%m/%Y"), cdate[1].strftime("%d/%m/%Y")]) for cdate in intervals]
1643
+ all_names = ["\n - \n".join([cdate[0].strftime("%d/%m/%Y"), cdate[1].strftime("%d/%m/%Y")]) for cdate in intervals]
1644
1644
 
1645
1645
 
1646
1646
  ## Plot
1647
1647
  nb_stations = len(stationKey)
1648
1648
  type_of_model = [self.myCatchments[el]["Title"] for el in self.myCatchments]
1649
- type_of_data = type_of_data = ["Nash", r"$ \frac{Q^{s}_{max}-Q^{m}_{max}}{Q^{m}_{max}} $ "]
1650
-
1649
+ type_of_data = ["Nash", r"$ \frac{Q^{s}_{max}-Q^{m}_{max}}{Q^{m}_{max}} $ "]
1650
+ type_of_data_names = ["Nash", "Exceedance"]
1651
+
1651
1652
  ph.bar_Nash_n_other(all_data, all_colors, nb_x=len(intervals), nb_data=len(type_of_model), nb_lines=nb_stations,
1652
- y_titles=type_of_data, x_titles=all_names, nameModel=type_of_model, line_names=sorted_keys, toShow=True)
1653
+ y_titles=type_of_data, x_titles=all_names, nameModel=type_of_model, line_names=sorted_keys, toShow=False)
1654
+
1655
+ # =========
1656
+ # =========
1657
+ # Plot tables - 2nd version with the table instead of bars
1658
+ all_ns= {
1659
+ cur_catch["Title"]: np.array([list(cur_catch["Object"].get_sub_Nash(measures[i], stationKey[i], intervals)) for i in range(len(stationKey))])
1660
+ for cur_catch in self.myCatchments.values()
1661
+ }
1662
+
1663
+ all_peaks = {cur_catch["Title"]: np.array([cur_catch["Object"].get_sub_peak(stationKey[i], intervals) for i in range(len(stationKey))])
1664
+ for cur_catch in self.myCatchments.values()
1665
+ }
1666
+
1667
+ print(all_peaks)
1668
+
1669
+ meas_peak = np.array([ measures[i].get_peak(intervals)
1670
+ for i in range(len(stationKey)) ])
1671
+
1672
+ # The following lines is take the peak difference between simulation and measurements -> Display 0.0 if the measurement is 0.0
1673
+ isZero = (meas_peak==0)
1674
+ notZero = (meas_peak!=0)
1675
+ peak_prop = {
1676
+ cur_model: (value-meas_peak)/(meas_peak+isZero) *notZero
1677
+ for cur_model, value in all_peaks.items()
1678
+ }
1679
+
1680
+ # Concatenate all data
1681
+ all_data = [all_ns, peak_prop]
1682
+
1683
+ for data, name_of_data in zip(all_data, type_of_data_names):
1684
+ for cur_model, cur_data in data.items():
1685
+ file_name = os.path.join(self.workingDir, name_of_data+"_"+cur_model)+".png"
1686
+ # cur_title = cur_model + ": " + name_of_data
1687
+ ph.table_Nash_n_other(cur_data, name_of_data,
1688
+ row_names=sorted_keys, column_names=all_names,
1689
+ writeFile=file_name, toShow=False)
1690
+
1691
+ plt.show()
@@ -2499,7 +2499,7 @@ class Optimisation(wx.Frame):
2499
2499
 
2500
2500
  # FIXME : this function has been dashed off -> functionnal but not well written!!
2501
2501
  # TODO : to improve !!!!!!
2502
- def test_equifinality_with_Nash(self, event, idLauncher:int=0, idOpti:int=1, quantile_Nash:float=0.6, std_Nash:float=0.02, clustering_Nash:bool=True):
2502
+ def test_equifinality_with_Nash(self, event, idLauncher:int=0, idOpti:int=1, quantile_Nash:float=0.01, std_Nash:float=0.3, clustering_Nash:bool=True):
2503
2503
  """
2504
2504
  Test the equifinality of the model.
2505
2505
 
@@ -2578,6 +2578,8 @@ class Optimisation(wx.Frame):
2578
2578
  # Save all the variables/evaluations desired
2579
2579
  frac_dict = self._get_cur_fractions(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
2580
2580
  cur_all_frac = list(frac_dict.values())
2581
+ frac_vol_dict = self._get_volume_fractions(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
2582
+ qof_max = self._get_max_runoff(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
2581
2583
  p_excess = self._get_exceedance(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
2582
2584
  max_sim_obs = self._get_ratio_max_sim_obs(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
2583
2585
  # Extract the time delays
@@ -2585,13 +2587,18 @@ class Optimisation(wx.Frame):
2585
2587
  all_timeDelays_str = {key : str(datetime.timedelta(seconds=all_timeDelays[key])) for key in all_timeDelays}
2586
2588
  cur_timeDelays = list(all_timeDelays_str.values())
2587
2589
  # Concatenate all the informations
2588
- cur_all_frac = list(cur_p) + cur_timeDelays + cur_all_frac + [p_excess, max_sim_obs, cur_obj]
2590
+ cur_all_frac = list(cur_p) + cur_timeDelays + cur_all_frac + list(frac_vol_dict.values()) + [qof_max, p_excess, max_sim_obs, cur_obj]
2589
2591
  all_frac.append(cur_all_frac)
2590
2592
 
2591
2593
  # Get param names
2592
2594
  names = self.get_param_names(idLauncher=idLauncher, stationOut=stationOut)
2593
2595
  # Save the evaluations
2594
- var_names = names + list(all_timeDelays_str.keys()) + list(frac_dict.keys()) + ["P. of exceedance", "Qmax_simul/Q_max_measure", "Nash"]
2596
+ var_names = names \
2597
+ + list(all_timeDelays_str.keys()) \
2598
+ + list(frac_dict.keys()) \
2599
+ + list(frac_vol_dict.keys()) \
2600
+ + ["% max runoff", "P. of exceedance", "Qmax_simul/Q_max_measure", "Nash"]
2601
+
2595
2602
  cur_df = pd.DataFrame(all_frac, columns=var_names)
2596
2603
  # write first the tempory results for each station
2597
2604
  writer_stat = pd.ExcelWriter(os.path.join(self.workingDir, stationOut+"_tests.xlsx"), engine = 'xlsxwriter')
@@ -2741,11 +2748,39 @@ class Optimisation(wx.Frame):
2741
2748
  curCatch:Catchment = self.myCases[idLauncher].refCatchment
2742
2749
  cur_key = curCatch.get_key_catchmentDict(stationOut)
2743
2750
  curBasin: SubBasin = curCatch.catchmentDict[cur_key]
2751
+ if type(curBasin) != SubBasin:
2752
+ logging.warning("The current module is not a SubBasin object!")
2753
+ return None
2744
2754
  cur_fracts = curBasin.get_summary_fractions(summary="mean", interval=intervals)
2745
2755
 
2746
2756
  return cur_fracts
2747
2757
 
2748
2758
 
2759
+ # TODO : to finish this function
2760
+ def _get_volume_fractions(self, idLauncher:int=0, stationOut:str="",
2761
+ intervals:list[tuple[datetime.datetime, datetime.datetime]]=[]) -> dict[list[str], list[float]]:
2762
+
2763
+ curCatch:Catchment = self.myCases[idLauncher].refCatchment
2764
+ cur_key = curCatch.get_key_catchmentDict(stationOut)
2765
+ curBasin: SubBasin = curCatch.catchmentDict[cur_key]
2766
+ if type(curBasin) != SubBasin:
2767
+ logging.warning("The current module is not a SubBasin object!")
2768
+ return None
2769
+ cur_fracts = curBasin.get_volume_fractions(interval=intervals)
2770
+ return cur_fracts
2771
+
2772
+ # FIXME : to improve and generalise
2773
+ def _get_max_runoff(self, idLauncher:int=0, stationOut:str="",
2774
+ intervals:list[tuple[datetime.datetime, datetime.datetime]]=[]) -> dict[list[str], list[float]]:
2775
+
2776
+ curCatch:Catchment = self.myCases[idLauncher].refCatchment
2777
+ cur_key = curCatch.get_key_catchmentDict(stationOut)
2778
+ curBasin: SubBasin = curCatch.catchmentDict[cur_key]
2779
+ cur_fracts = curBasin.get_summary_fractions(summary="max", interval=intervals)
2780
+
2781
+ return cur_fracts["% qof"]
2782
+
2783
+
2749
2784
  def _get_ratio_max_sim_obs(self, idLauncher:int=0, stationOut:str="",
2750
2785
  intervals:list[tuple[datetime.datetime, datetime.datetime]]=[]) -> float:
2751
2786
 
@@ -2795,6 +2830,8 @@ class Optimisation(wx.Frame):
2795
2830
  def plot_equifinality(self, event, idLauncher:int=0):
2796
2831
 
2797
2832
  physical_properties = ["%qof", "%qif", "%qbf", "%loss"]
2833
+ physical_properties_vol = ['% qof volume', '% qif volume', '% qbf volume', '% loss volume']
2834
+ # physical_properties_vol = [el+" volume" for el in physical_properties]
2798
2835
  colors_properties = ["b", "g", "k", "orange"]
2799
2836
  y_label = "Nash"
2800
2837
 
@@ -2820,6 +2857,19 @@ class Optimisation(wx.Frame):
2820
2857
  ax.set_title("Proportion of rain : "+stationOut)
2821
2858
  ax.legend()
2822
2859
  fig.savefig(os.path.join(self.workingDir, "Equifinality_physical_prop_"+stationOut+".png"))
2860
+ # Plot the physical property volumes
2861
+ fig, ax = plt.subplots()
2862
+ for cur_prop, cur_color in zip(physical_properties_vol, colors_properties):
2863
+ cur_columns = [col for col in df.columns if cur_prop.replace(" ", "") in col.replace(" ", "")]
2864
+ if cur_columns != []:
2865
+ corr_prop = cur_columns[0]
2866
+ ax.scatter(df.loc[:,corr_prop], df.loc[:,y_label], s=0.5, c=cur_color,
2867
+ marker='o', label=cur_prop, alpha=0.4)
2868
+ ax.set_xlabel("% of the rain volume [-]")
2869
+ ax.set_ylabel(y_label+" [-]")
2870
+ ax.set_title("Proportion of rain volume : "+stationOut)
2871
+ ax.legend()
2872
+ fig.savefig(os.path.join(self.workingDir, "Equifinality_physical_prop_volumes_"+stationOut+".png"))
2823
2873
  # Plot the Probability of exceedance
2824
2874
  cur_color = colors_properties[0]
2825
2875
  x_label = "P. of exceedance"
@@ -2841,7 +2891,16 @@ class Optimisation(wx.Frame):
2841
2891
  ax.set_title("Peak analysis : "+stationOut)
2842
2892
  ax.legend()
2843
2893
  fig.savefig(os.path.join(self.workingDir, "Equifinality_peaks_ratio_"+stationOut+".png"))
2844
-
2894
+ # Plot % of the max runoff
2895
+ x_label = "% max runoff"
2896
+ fig, ax = plt.subplots()
2897
+ if x_label in df.columns:
2898
+ ax.scatter(df.loc[:,x_label], df.loc[:,y_label], s=0.5, c=cur_color, marker='o', label=x_label)
2899
+ ax.set_xlabel(x_label +" [-]")
2900
+ ax.set_ylabel(y_label+" [-]")
2901
+ ax.set_title("Max runoff [%] : "+stationOut)
2902
+ ax.legend()
2903
+ fig.savefig(os.path.join(self.workingDir, "Equifinality_max_runoff_"+stationOut+".png"))
2845
2904
  else:
2846
2905
  logging.error("The file "+filename+" does not exist!")
2847
2906