wolfhece 2.2.8__py3-none-any.whl → 2.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -27,6 +27,8 @@ from ..wolf_array import *
27
27
  from ..PyGui import GenMapManager,HydrologyModel
28
28
  from . import cst_exchanges as cste
29
29
  from . import constant as cst
30
+ from . import Models_characteristics as mc
31
+ from . import Internal_variables as iv
30
32
  from ..PyTranslate import _
31
33
  import traceback
32
34
 
@@ -228,7 +230,12 @@ class Optimisation(wx.Frame):
228
230
  self.Bind(wx.EVT_MENU, self.test_equifinality_with_Nash, testEquiFinClick)
229
231
  plotEquiFinClick = toolMenu.Append(wx.ID_ANY, 'Plot equifinality with Nash')
230
232
  self.Bind(wx.EVT_MENU, self.plot_equifinality, plotEquiFinClick)
231
-
233
+ testEquiFinClick = toolMenu.Append(wx.ID_ANY, 'Models analysis with Nash')
234
+ self.Bind(wx.EVT_MENU, self.launch_models_propertie_with_Nash, testEquiFinClick)
235
+ plotEquiFinClick = toolMenu.Append(wx.ID_ANY, 'Plot analysis with Nash')
236
+ self.Bind(wx.EVT_MENU, self.plot_model_analysis, plotEquiFinClick)
237
+
238
+
232
239
 
233
240
  # Creation of the Lauch Menu
234
241
  launchMenu = wx.Menu()
@@ -1990,24 +1997,31 @@ class Optimisation(wx.Frame):
1990
1997
  if cur_sub.iDSorted != refCatch.myEffSortSubBasins[cur_effsub]:
1991
1998
  continue
1992
1999
  self.myParams[i+1]["value"] = params[i]
1993
- if "Convertion Factor" in myModelDict[int(myType)]:
1994
- convFact = myModelDict[int(myType)]["Convertion Factor"]
1995
- else:
1996
- convFact = 1.0
2000
+
1997
2001
  all_files = myModelDict[int(myType)]["File"]
1998
2002
  if type(all_files) is not list:
2003
+ # Extract the unit conversion factor
2004
+ if "Convertion Factor" in myModelDict[int(myType)]:
2005
+ convFact = myModelDict[int(myType)]["Convertion Factor"]
2006
+ else:
2007
+ convFact = 1.0
1999
2008
  fileName = myModelDict[int(myType)]["File"]
2000
2009
  myGroup = myModelDict[int(myType)]["Group"]
2001
2010
  myKey = myModelDict[int(myType)]["Key"]
2002
2011
  self.write_one_opti_param(filePath, fileName, myGroup, myKey, params[i], convers_factor=convFact)
2003
2012
  else:
2013
+ # Extract the unit conversion factor in a list which is the same size as the number of files
2014
+ if "Convertion Factor" in myModelDict[int(myType)]:
2015
+ convFact = myModelDict[int(myType)]["Convertion Factor"]
2016
+ else:
2017
+ convFact = [1.0]*len(all_files)
2018
+ # Iterate over all the files to fill for one parameter
2004
2019
  for iFile in range(len(all_files)):
2005
2020
  fileName = all_files[iFile]
2006
2021
  myGroup = myModelDict[int(myType)]["Group"][iFile]
2007
2022
  myKey = myModelDict[int(myType)]["Key"][iFile]
2008
- self.write_one_opti_param(filePath, fileName, myGroup, myKey, params[i], convers_factor=convFact)
2009
- else:
2010
-
2023
+ self.write_one_opti_param(filePath, fileName, myGroup, myKey, params[i], convers_factor=convFact[iFile])
2024
+ else:
2011
2025
  self.curParams_vec_F[i] = params[i]
2012
2026
  self.update_timeDelay(i+1)
2013
2027
  refCatch.save_timeDelays([self.myParams[i+1]["junction_name"]])
@@ -2662,10 +2676,10 @@ class Optimisation(wx.Frame):
2662
2676
  logging.info("The equifinality test is finished!")
2663
2677
 
2664
2678
 
2665
- def get_best_params(self, stationOut:str,
2666
- criterion:str="Nash", quantile:float=0.99, std:float=0.05, eps:float=0.1,
2667
- objective_fct:bool= True, apply_clustering:bool=False):
2668
- from sklearn.cluster import DBSCAN
2679
+ def get_best_params(self, stationOut:str,
2680
+ criterion:str="Nash", quantile:float=0.99, std:float=0.05, eps:float=0.2, rmv_near_max=1e-4, nb_rand_close:int=10,
2681
+ objective_fct:bool= True, apply_clustering:bool=False, objective_weight:float=1.0):
2682
+ from sklearn.cluster import DBSCAN
2669
2683
  """
2670
2684
  Get the best parameters for a given station.
2671
2685
 
@@ -2685,12 +2699,26 @@ class Optimisation(wx.Frame):
2685
2699
 
2686
2700
  quantile_cond = (all_obj_fct > np.quantile(all_obj_fct, quantile))
2687
2701
  std_cond = (all_obj_fct > best_objfct*(1-std))
2688
- all_cond = np.where(np.logical_and(quantile_cond, std_cond))[0]
2702
+ tooclose_cond = (all_obj_fct < best_objfct*(1-rmv_near_max)) | (all_obj_fct == best_objfct)
2703
+ all_cond = np.where(quantile_cond & std_cond & tooclose_cond)[0]
2689
2704
  eff_params = all_params[all_cond]
2690
2705
  eff_obj = all_obj_fct[all_cond]
2691
2706
 
2692
2707
  if objective_fct:
2693
2708
  eff_params = np.column_stack((eff_params, eff_obj))
2709
+ # Select randomly the parameters that are close to the best one
2710
+ if nb_rand_close>0:
2711
+ close_params = all_params[~tooclose_cond]
2712
+ if np.shape(close_params)[0]>0:
2713
+ close_obj = all_obj_fct[~tooclose_cond]
2714
+ # random selection of the parameters that are close to the best one
2715
+ idx = np.random.choice(np.shape(close_params)[0], size=nb_rand_close, replace=False)
2716
+ selected_params = close_params[idx]
2717
+ selected_obj = close_obj[idx]
2718
+ tot_add_params = np.column_stack((selected_params, selected_obj))
2719
+ # Add the selected parameters to the eff_params
2720
+ eff_params = np.vstack((eff_params, tot_add_params))
2721
+ # add to the eff_params
2694
2722
 
2695
2723
  # In this part we filter abd remove the parameters that are almost equivalent
2696
2724
  # To do so, we use the DBSCAN clustering algorithm to group the parameters that are close to each other
@@ -2701,6 +2729,10 @@ class Optimisation(wx.Frame):
2701
2729
  min_param = np.min(eff_params, axis=0)
2702
2730
  max_param = np.max(eff_params, axis=0)
2703
2731
  norm_params = (eff_params-min_param)/(max_param-min_param)
2732
+ # Add weight to the objective function to make it more important in the clustering
2733
+ # FIXME : to be improved
2734
+ norm_params[:,-1] = norm_params[:,-1]*objective_weight
2735
+ # Apply the DBSCAN clustering algorithm to group the parameters
2704
2736
  db = DBSCAN(eps=eps).fit(norm_params)
2705
2737
  labels = db.labels_
2706
2738
  # Extraction of the number of groups and particular cases
@@ -2800,6 +2832,35 @@ class Optimisation(wx.Frame):
2800
2832
  return None
2801
2833
  cur_fracts = curBasin.get_volume_fractions(interval=intervals)
2802
2834
  return cur_fracts
2835
+
2836
+ def _get_flow_fractions(self, idLauncher:int=0, stationOut:str="",
2837
+ intervals:list[tuple[datetime.datetime, datetime.datetime]]=[]) -> dict[list[str], list[float]]:
2838
+
2839
+ curCatch:Catchment = self.myCases[idLauncher].refCatchment
2840
+ cur_key = curCatch.get_key_catchmentDict(stationOut)
2841
+ curBasin: SubBasin = curCatch.catchmentDict[cur_key]
2842
+ if type(curBasin) != SubBasin:
2843
+ logging.warning("The current module is not a SubBasin object!")
2844
+ return None
2845
+ cur_fracts = curBasin.get_flow_fractions(interval=intervals, summary="mean")
2846
+ return cur_fracts
2847
+
2848
+
2849
+ def _get_punctual_reservoir_fractions(self, eval_date:datetime.datetime,
2850
+ idLauncher:int=0, stationOut:str="") -> dict[list[str], list[float]]:
2851
+
2852
+ curCatch:Catchment = self.myCases[idLauncher].refCatchment
2853
+ cur_key = curCatch.get_key_catchmentDict(stationOut)
2854
+ curBasin: SubBasin = curCatch.catchmentDict[cur_key]
2855
+ if type(curBasin) != SubBasin:
2856
+ logging.warning("The current module is not a SubBasin object!")
2857
+ return None
2858
+ linked_params = mc.MODELS_VAR[curBasin.model].get_all_linked_params()
2859
+ i_params = self._get_key_from_type_all_parameters(list(linked_params.values()))
2860
+ max_params = {var_name: self.myParams[i_params[param_id]]["value"] for var_name, param_id in linked_params.items()}
2861
+ cur_fracts = curBasin.get_iv_fractions_one_date(max_params=max_params, eval_date=eval_date)
2862
+ return cur_fracts
2863
+
2803
2864
 
2804
2865
  # FIXME : to improve and generalise
2805
2866
  def _get_max_runoff(self, idLauncher:int=0, stationOut:str="",
@@ -2855,7 +2916,6 @@ class Optimisation(wx.Frame):
2855
2916
  for i in range(nbInlets):
2856
2917
  names.append("TimeDelay "+inletsNames[i])
2857
2918
 
2858
-
2859
2919
  return names
2860
2920
 
2861
2921
  # Plot the equifinalty test for each station
@@ -2938,6 +2998,63 @@ class Optimisation(wx.Frame):
2938
2998
 
2939
2999
  plt.show()
2940
3000
 
3001
+ # Plot the equifinalty test for each station
3002
+ def plot_model_analysis(self, event, idLauncher:int=0):
3003
+
3004
+ physical_properties = ["%q_of", "%q_if", "%q_bf"]
3005
+ # physical_properties_vol = [el+" volume" for el in physical_properties]
3006
+ colors_properties = ["b", "g", "k"]
3007
+ y_label = "Nash"
3008
+
3009
+ if self.myStations==[]:
3010
+ self.set_compare_stations(idLauncher=idLauncher)
3011
+ sortJct = self.myStations
3012
+
3013
+ for iOpti in range(len(sortJct)):
3014
+ stationOut = sortJct[iOpti]
3015
+ filename = os.path.join(self.workingDir, stationOut+"_tests.xlsx")
3016
+ if os.path.isfile(filename):
3017
+ df = pd.read_excel(filename, sheet_name=stationOut)
3018
+ # Plot the physical properties
3019
+ fig, ax = plt.subplots()
3020
+ for cur_prop, cur_color in zip(physical_properties, colors_properties):
3021
+ cur_columns = [col for col in df.columns if cur_prop in col.replace(" ", "")]
3022
+ if cur_columns != []:
3023
+ corr_prop = cur_columns[0]
3024
+ ax.scatter(df.loc[:,corr_prop], df.loc[:,y_label], s=0.5, c=cur_color,
3025
+ marker='o', label=cur_prop, alpha=0.4)
3026
+ ax.set_xlabel("% of the rain [-]")
3027
+ ax.set_ylabel(y_label+" [-]")
3028
+ ax.set_title("Proportion of rain : "+stationOut)
3029
+ ax.legend()
3030
+ fig.savefig(os.path.join(self.workingDir, "Equifinality_physical_prop_"+stationOut+".png"))
3031
+ # Plot the Probability of exceedance
3032
+ cur_color = colors_properties[0]
3033
+ x_label = "P. of exceedance"
3034
+ fig, ax = plt.subplots()
3035
+ if x_label in df.columns:
3036
+ ax.scatter(df.loc[:,x_label], df.loc[:,y_label], s=0.5, c=cur_color, marker='o', label=x_label)
3037
+ ax.set_xlabel(x_label +" [-]")
3038
+ ax.set_ylabel(y_label+" [-]")
3039
+ ax.set_title("Probability of Q_sim > Q_meas : "+stationOut)
3040
+ ax.legend()
3041
+ fig.savefig(os.path.join(self.workingDir, "Equifinality_prob_excess_"+stationOut+".png"))
3042
+ # Plot Q_sim/Q_max
3043
+ x_label = "Qmax_simul/Q_max_measure"
3044
+ fig, ax = plt.subplots()
3045
+ if x_label in df.columns:
3046
+ ax.scatter(df.loc[:,x_label], df.loc[:,y_label], s=0.5, c=cur_color, marker='o', label=x_label)
3047
+ ax.set_xlabel(x_label +" [-]")
3048
+ ax.set_ylabel(y_label+" [-]")
3049
+ ax.set_title("Peak analysis : "+stationOut)
3050
+ ax.legend()
3051
+ fig.savefig(os.path.join(self.workingDir, "Equifinality_peaks_ratio_"+stationOut+".png"))
3052
+
3053
+ else:
3054
+ logging.error("The file "+filename+" does not exist!")
3055
+
3056
+ plt.show()
3057
+
2941
3058
 
2942
3059
  def add_Case(self, idLauncher:int=0):
2943
3060
 
@@ -3093,6 +3210,169 @@ class Optimisation(wx.Frame):
3093
3210
  tmpWolf = None
3094
3211
 
3095
3212
 
3213
+ # FIXME : this function has been dashed off -> functionnal but not well written!!
3214
+ # TODO : to improve !!!!!!
3215
+ def launch_models_propertie_with_Nash(self, event, idLauncher:int=0, idOpti:int=1, quantile_Nash:float=0.01, std_Nash:float=0.03, clustering_Nash:bool=True,
3216
+ save_every:int=100, restart_from_file:bool=True):
3217
+ """
3218
+ Analyse the properties of the model and compare them with the Nash coefficient.
3219
+
3220
+ Args:
3221
+ idLauncher (int, optional): The id of the launcher. Defaults to 0.
3222
+
3223
+ Returns:
3224
+ None
3225
+
3226
+ Raises:
3227
+ None
3228
+ """
3229
+ curCatch:Catchment = self.myCases[idLauncher].refCatchment
3230
+
3231
+ onlyOwnSub = self.optiParam.get_param("Semi-Distributed", "Own_SubBasin")
3232
+ if onlyOwnSub is None:
3233
+ onlyOwnSub = False
3234
+ doneList = []
3235
+ previousLevel = 1
3236
+ # Collect sort and save the compare stations
3237
+ self.set_compare_stations(idLauncher=idLauncher)
3238
+ sortJct = self.myStations
3239
+ # Get the initial number of intervals
3240
+ # -> these can evolve according to the measurement available at each station
3241
+ is_ok = self._save_opti_intervals()
3242
+ all_intervals = self.all_intervals
3243
+ # Activate the writing of the internal variables
3244
+ curCatch.activate_all_internal_variables()
3245
+ # Prepare the Excel writer
3246
+ writer_tot = pd.ExcelWriter(os.path.join(self.workingDir, "all_best_tests.xlsx"), engine = 'xlsxwriter')
3247
+
3248
+ for iOpti in range(len(sortJct)):
3249
+ stationOut = sortJct[iOpti]
3250
+ logging.info("==================")
3251
+ logging.info("Station : "+stationOut)
3252
+ # Build the current compare.txt file and replace all nan values by 0.0
3253
+ self.save_current_compare_file(stationOut=stationOut)
3254
+ # Save the name of the station that will be the output
3255
+ curCatch.define_station_out(stationOut)
3256
+ # Activate all the useful subs and write it in the param file
3257
+ curCatch.activate_usefulSubs(blockJunction=doneList, onlyItself=onlyOwnSub)
3258
+ # Select correct calibration intervals -> remove the intervals with NaN
3259
+ cur_intervals = self.select_opti_intervals(all_intervals=all_intervals, stationOut=stationOut, filter_nan=True)
3260
+ self.save_opti_dates_to_file(cur_intervals)
3261
+ # Rename the result file
3262
+ self.optiParam.change_param("Optimizer", "fname", stationOut)
3263
+ self.optiParam.SavetoFile(None)
3264
+ self.optiParam.Reload(None)
3265
+ self.update_myParams(idLauncher)
3266
+ # Prepare the paramPy dictionnary before calibration
3267
+ self.prepare_calibration_timeDelay(stationOut=stationOut)
3268
+ # Reload the useful modules
3269
+ self.reload_hydro(idCompar=0, fromStation=stationOut, lastLevel=previousLevel, updateAll=True)
3270
+ ## =======
3271
+ ## Init
3272
+ ## =======
3273
+ self.init_optimizer(idOpti)
3274
+ self.associate_ptr(None, idOpti=idOpti)
3275
+ # Get the best parameters to test
3276
+ all_params = self.get_best_params(stationOut=stationOut, quantile=quantile_Nash, std=std_Nash, rmv_near_max=1e-4, apply_clustering=clustering_Nash)
3277
+ ## =======
3278
+ ## Compute
3279
+ ## =======
3280
+ all_frac = []
3281
+ # Check if the excel file already exists and load it to check if some parameters have already been tested
3282
+ if restart_from_file:
3283
+ all_frac, all_params = self._reload_model_analysis(stationOut=stationOut, all_params=all_params)
3284
+ # Get param names
3285
+ names = self.get_param_names(idLauncher=idLauncher, stationOut=stationOut)
3286
+ logging.info("The number of sets of parameters to test are : "+str(len(all_params)))
3287
+ for i in tqdm(range(len(all_params))):
3288
+ cur_p = all_params[i, :-1]
3289
+ cur_obj = all_params[i, -1]
3290
+ cur_obj2 = self.evaluate_model_optimizer(cur_p, idOpti=idOpti)
3291
+ print("cur_obj : ", cur_obj, " ; cur_obj2 : ", cur_obj2)
3292
+ if cur_obj != cur_obj2:
3293
+ logging.error("The objective function is not the same as the one computed by the model!")
3294
+ logging.error("cur_obj : "+str(cur_obj)+" ; cur_obj2 : "+str(cur_obj2))
3295
+ # assert cur_obj == cur_obj2, "The objective function is not the same as the one computed by the model!"
3296
+ self.write_mesh_results_optimizer(idOpti=idOpti)
3297
+ # Save all the variables/evaluations desired
3298
+ frac_flow_dict = self._get_flow_fractions(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
3299
+ init_iv = self._get_punctual_reservoir_fractions(eval_date=cur_intervals[0][0], idLauncher=idLauncher, stationOut=stationOut)
3300
+ p_excess = self._get_exceedance(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
3301
+ max_sim_obs = self._get_ratio_max_sim_obs(idLauncher=idLauncher, stationOut=stationOut, intervals=cur_intervals)
3302
+ # Extract the time delays
3303
+ all_timeDelays = curCatch.get_timeDelays_inlets(ref=stationOut)
3304
+ all_timeDelays_str = {key : str(datetime.timedelta(seconds=all_timeDelays[key])) for key in all_timeDelays}
3305
+ cur_timeDelays = list(all_timeDelays_str.values())
3306
+ # Concatenate all the informations
3307
+ cur_all_frac = (list(cur_p)
3308
+ + cur_timeDelays
3309
+ + list(frac_flow_dict.values())
3310
+ + list(init_iv.values())
3311
+ + [p_excess, max_sim_obs, cur_obj])
3312
+ all_frac.append(cur_all_frac)
3313
+ # Periodically save the evaluations in case of trouble
3314
+ if (i + 1) % save_every == 0:
3315
+ # Save the evaluations
3316
+ var_names = names \
3317
+ + list(all_timeDelays_str.keys()) \
3318
+ + list(frac_flow_dict.keys()) \
3319
+ + list(init_iv.keys()) \
3320
+ + ["P. of exceedance", "Qmax_simul/Q_max_measure", "Nash"]
3321
+ cur_df = pd.DataFrame(all_frac, columns=var_names)
3322
+ # write first the tempory results for each station
3323
+ writer_stat = pd.ExcelWriter(os.path.join(self.workingDir, stationOut+"_tests.xlsx"), engine = 'xlsxwriter')
3324
+ cur_df.to_excel(writer_stat, sheet_name=stationOut, columns=var_names)
3325
+ writer_stat.sheets[stationOut].autofit()
3326
+ writer_stat.close()
3327
+
3328
+ # Save the evaluations
3329
+ var_names = names \
3330
+ + list(all_timeDelays_str.keys()) \
3331
+ + list(frac_flow_dict.keys()) \
3332
+ + list(init_iv.keys()) \
3333
+ + ["P. of exceedance", "Qmax_simul/Q_max_measure", "Nash"]
3334
+ cur_df = pd.DataFrame(all_frac, columns=var_names)
3335
+ # write first the tempory results for each station
3336
+ writer_stat = pd.ExcelWriter(os.path.join(self.workingDir, stationOut+"_tests.xlsx"), engine = 'xlsxwriter')
3337
+ cur_df.to_excel(writer_stat, sheet_name=stationOut, columns=var_names)
3338
+ writer_stat.sheets[stationOut].autofit()
3339
+ writer_stat.close()
3340
+ # write now the informations for all the stations in the same excel file
3341
+ cur_df.to_excel(writer_tot, sheet_name=stationOut, columns=var_names)
3342
+ writer_tot.sheets[stationOut].autofit()
3343
+
3344
+ ## =======
3345
+ ## =======
3346
+ # Collect the best parameters and their objective function(s)
3347
+ best_params = self.apply_optim(None)
3348
+ # Simulation with the best parameters
3349
+ self.compute_distributed_hydro_model()
3350
+ # Update myHydro of all effective subbasins to get the best configuration upstream
3351
+ curCatch.read_hydro_eff_subBasin()
3352
+ # Update timeDelays according to time wolf_array
3353
+ self.apply_timeDelay_dist(idOpti=idOpti, idLauncher=idLauncher, junctionKey=stationOut)
3354
+ # Update the outflows
3355
+ curCatch.update_hydro(idCompar=0)
3356
+
3357
+ # All upstream elements of a reference will be fixed
3358
+ doneList.append(stationOut)
3359
+ previousLevel = curCatch.levelOut
3360
+
3361
+ writer_tot.close()
3362
+ logging.info("The equifinality test is finished!")
3363
+
3364
+ # FIXME : it might be better to pass the myParams to the CaseOpti object instead to allow parallelisation
3365
+ def _build_type_to_key_index(self) -> dict[int, int]:
3366
+ return {param["type"]: i for i, param in self.myParams.items()}
3367
+
3368
+ def _get_key_from_type_all_parameters(self, list_type_param: list[int]) -> dict[int | None]:
3369
+ type_to_key = self._build_type_to_key_index()
3370
+ return {cur_key: type_to_key.get(cur_key) for cur_key in list_type_param}
3371
+
3372
+ def _get_key_from_type_parameter(self, type_param:int) -> int:
3373
+ return next((i for i, param in self.myParams.items() if param["type"] == type_param), None)
3374
+
3375
+
3096
3376
  def make_nd_array(self, c_pointer, shape, dtype=np.float64, order='C', own_data=True,readonly=False):
3097
3377
  arr_size = np.prod(shape[:]) * np.dtype(dtype).itemsize
3098
3378
 
@@ -3109,3 +3389,33 @@ class Optimisation(wx.Frame):
3109
3389
  return arr.copy()
3110
3390
  else:
3111
3391
  return arr
3392
+
3393
+ def _reload_model_analysis(self, stationOut:str, all_params:np.ndarray):
3394
+ """
3395
+ Reload the model analysis for a given station.
3396
+
3397
+ Args:
3398
+ stationOut (str): The name of the station.
3399
+ all_params (np.ndarray): The parameters to be tested.
3400
+
3401
+ Returns:
3402
+ None
3403
+
3404
+ Raises:
3405
+ None
3406
+ """
3407
+ # Check if the excel file already exists and load it to check if some parameters have already been tested
3408
+ filename = os.path.join(self.workingDir, stationOut+"_tests.xlsx")
3409
+ # just_params = all_params[:, :-1]
3410
+ nb_params = np.shape(all_params)[1] - 1
3411
+ if os.path.isfile(filename):
3412
+ df = pd.read_excel(os.path.join(self.workingDir, stationOut+"_tests.xlsx"), sheet_name=stationOut)
3413
+ # Extract all the values of the dataframe in a list
3414
+ all_data_tested = df.iloc[:, 1:].values.tolist()
3415
+ # Extract all the values of the dataframe in a numpy array
3416
+ all_params_tested = df.iloc[:, 1:nb_params+1].values
3417
+ # Remove the parameters that have already been tested
3418
+ new_params = np.array([el for el in all_params if ~np.any(np.all(np.isclose(all_params_tested, el[:-1], atol=1e-6), axis=1))])
3419
+ return all_data_tested, new_params
3420
+
3421
+ return [], all_params
@@ -34,6 +34,8 @@ from . import plot_hydrology as ph
34
34
  from . import data_treatment as datt
35
35
  from . import read as rd
36
36
  from . import constant as cst
37
+ from . import Models_characteristics as mc
38
+ from . import Internal_variables as iv
37
39
 
38
40
  from ..wolf_array import *
39
41
  from ..PyParams import*
@@ -3291,12 +3293,16 @@ class SubBasin:
3291
3293
  """
3292
3294
  This procedure is activating all internal variables of all the hydrological modules.
3293
3295
  """
3294
- if self.model == cst.tom_VHM:
3295
- self.activate_all_iv_VHM()
3296
- elif self.model == cst.tom_GR4:
3297
- self.activate_all_iv_GR4()
3298
- elif self.model == cst.tom_2layers_linIF:
3299
- self.activate_all_iv_2layers()
3296
+ # if self.model == cst.tom_VHM:
3297
+ # self.activate_all_iv_VHM()
3298
+ # elif self.model == cst.tom_GR4:
3299
+ # self.activate_all_iv_GR4()
3300
+ # elif self.model == cst.tom_2layers_linIF:
3301
+ # self.activate_all_iv_2layers()
3302
+ cur_dir = os.path.join(self.fileNameRead, "Subbasin_"+str(self.iDSorted))
3303
+
3304
+ mc.MODELS_VAR[self.model].activate_all(directory=cur_dir, prefix_file='simul', type_of_var=iv.FINAL_OUT_VAR)
3305
+ mc.MODELS_VAR[self.model].activate_all(directory=cur_dir, prefix_file='simul', type_of_var=iv.IV_VAR)
3300
3306
 
3301
3307
 
3302
3308
  def collect_x_VHM(self) -> dict[str,np.array]:
@@ -3559,29 +3565,8 @@ class SubBasin:
3559
3565
 
3560
3566
  if all_f == {}:
3561
3567
  all_f = self.collect_fractions()
3562
-
3563
- if interval is not None:
3564
- interv = np.zeros(len(self.time), dtype=bool)
3565
- for el in interval:
3566
- date_i = datetime.datetime.timestamp(el[0])
3567
- date_f = datetime.datetime.timestamp(el[1])
3568
- interv += (self.time>=date_i) & (self.time<=date_f)
3569
- else:
3570
- interv = np.ones(len(self.time), dtype=bool)
3571
3568
 
3572
- if summary == "mean":
3573
- return {key: np.nanmean(all_f[key], where=interv) for key in all_f}
3574
- elif summary == "median":
3575
- return {key: np.nanmedian(all_f[key][interv]) for key in all_f}
3576
- elif summary == "std":
3577
- return {key: np.nanstd(all_f[key][interv]) for key in all_f}
3578
- elif summary == "min":
3579
- return {key: np.nanmin(all_f[key], where=interv) for key in all_f}
3580
- elif summary == "max":
3581
- return {key: np.nanmax(all_f[key], where=interv, initial=0.0) for key in all_f}
3582
- else:
3583
- logging.error("The summary type is not recognised!")
3584
- return []
3569
+ return self._operation_on_ts(all_f, operation=summary, interval=interval)
3585
3570
 
3586
3571
 
3587
3572
  def get_volume_fractions(self, all_f:dict={},
@@ -3717,7 +3702,80 @@ class SubBasin:
3717
3702
  unit='mm/h'
3718
3703
 
3719
3704
  return self.get_outFlow(unit=unit)
3705
+
3706
+
3707
+ def get_flow_fractions(self, all_f:dict={}, summary:str=None,
3708
+ interval:list[tuple[datetime.datetime, datetime.datetime]]=None) -> dict[str, np.ndarray|float]:
3709
+ """
3710
+ This procedure is returning a summary of the fractions of the current module.
3711
+
3712
+ Parameters:
3713
+ - summary (str): The type of summary to return.
3714
+ - interval (list[datetime.datetime], optional): The interval of time to consider. Default is None.
3715
+
3716
+ Returns:
3717
+ - dict: A dictionary containing the summary of the fractions of the current module.
3718
+ """
3719
+
3720
+ if all_f == {}:
3721
+ cur_dir = os.path.join(self.fileNameRead, "Subbasin_"+str(self.iDSorted))
3722
+ all_qin = mc.MODELS_VAR[self.model].get_all_iv_timeseries(directory=cur_dir,
3723
+ prefix_file='simul', type_of_var=iv.FINAL_OUT_VAR)
3724
+ all_f = mc.MODELS_VAR[self.model].get_all_iv_timeseries(directory=cur_dir,
3725
+ prefix_file='simul', type_of_var=iv.DEFAULT_VAR)
3726
+ all_f.update(all_qin)
3727
+
3728
+ q_simul = self.get_outFlow(unit='mm/h')
3729
+ all_r = {"%"+key: val/q_simul * 100.0 for key, val in all_f.items()}
3730
+
3731
+ return self._operation_on_ts(all_r, summary=summary, interval=interval)
3732
+
3733
+
3734
+ def get_iv(self, all_iv:dict={}, max_params:dict={}, summary:str=None,
3735
+ interval:list[tuple[datetime.datetime, datetime.datetime]]=None) -> dict[str, np.array]:
3736
+ """
3737
+ This procedure is returning a summary of the fractions of the current module.
3738
+
3739
+ Parameters:
3740
+ - summary (str): The type of summary to return.
3741
+ - interval (list[datetime.datetime], optional): The interval of time to consider. Default is None.
3742
+
3743
+ Returns:
3744
+ - dict: A dictionary containing the summary of the fractions of the current module.
3745
+ """
3746
+
3747
+ if all_iv == {}:
3748
+ cur_dir = os.path.join(self.fileNameRead, "Subbasin_"+str(self.iDSorted))
3749
+ all_iv = mc.MODELS_VAR[self.model].get_all_iv_timeseries(directory=cur_dir,
3750
+ prefix_file='simul', type_of_var=iv.IV_VAR)
3751
+ if max_params != {}:
3752
+ out_dict = {key: all_iv[key]/cur_max for key, cur_max in max_params.items()}
3753
+ else:
3754
+ out_dict = all_iv
3720
3755
 
3756
+ return self._operation_on_ts(out_dict, summary=summary, interval=interval)
3757
+
3758
+
3759
+ def get_iv_fractions_one_date(self, all_iv:dict={}, max_params:dict={}, eval_date:datetime.datetime=None) -> dict[str, np.array]:
3760
+ """
3761
+ This procedure is returning a summary of the fractions of the current module.
3762
+
3763
+ Parameters:
3764
+ - summary (str): The type of summary to return.
3765
+ - interval (list[datetime.datetime], optional): The interval of time to consider. Default is None.
3766
+
3767
+ Returns:
3768
+ - dict: A dictionary containing the summary of the fractions of the current module.
3769
+ """
3770
+
3771
+ all_iv = self.get_iv(all_iv=all_iv, max_params=max_params, summary=None)
3772
+ t_eval = datetime.datetime.timestamp(eval_date)
3773
+ eval_i = np.searchsorted(self.time, t_eval)
3774
+ if self.time[eval_i] != t_eval:
3775
+ logging.warning("The date is not in the time series!")
3776
+ return {}
3777
+
3778
+ return {"% "+key: val[eval_i] for key, val in all_iv.items()}
3721
3779
 
3722
3780
 
3723
3781
  def import_from_pandas_Series(self, data:pd.Series, which="outFlow"):
@@ -3760,7 +3818,33 @@ class SubBasin:
3760
3818
  tserie = pd.Series(data, index=idx, copy=True, name=" ".join([self.name,which]))
3761
3819
 
3762
3820
  return tserie
3821
+
3822
+
3823
+ def _operation_on_ts(self, ts:dict[str, np.ndarray], summary:str=None, interval:list[tuple[datetime.datetime, datetime.datetime]]=None):
3824
+ if interval is not None:
3825
+ interv = np.zeros(len(self.time), dtype=bool)
3826
+ for el in interval:
3827
+ date_i = datetime.datetime.timestamp(el[0])
3828
+ date_f = datetime.datetime.timestamp(el[1])
3829
+ interv += (self.time>=date_i) & (self.time<=date_f)
3830
+ else:
3831
+ interv = np.ones(len(self.time), dtype=bool)
3763
3832
 
3833
+ if summary is None:
3834
+ return {key: ts[key][interv] for key in ts}
3835
+ elif summary == "mean":
3836
+ return {key: np.nanmean(ts[key], where=interv) for key in ts}
3837
+ elif summary == "median":
3838
+ return {key: np.nanmedian(ts[key][interv]) for key in ts}
3839
+ elif summary == "std":
3840
+ return {key: np.nanstd(ts[key][interv]) for key in ts}
3841
+ elif summary == "min":
3842
+ return {key: np.nanmin(ts[key], where=interv) for key in ts}
3843
+ elif summary == "max":
3844
+ return {key: np.nanmax(ts[key], where=interv, initial=0.0) for key in ts}
3845
+ else:
3846
+ logging.error("The summary type is not recognised!")
3847
+ return {}
3764
3848
 
3765
3849
  # def plot_Nash_vs_Qexcess(self, figure:plt.axis=None, toShow:bool=False, writeFile:str=""):
3766
3850
 
@@ -555,6 +555,7 @@ NAM["Parameters"][exchange_parameters_NAM_CK12]["File"] = ["simul_OF.param", "si
555
555
  NAM["Parameters"][exchange_parameters_NAM_CK12]["Group"] = ["Time Parameters", "NAM parameters"]
556
556
  NAM["Parameters"][exchange_parameters_NAM_CK12]["Key"] = ["Lagtime", "Lagtime"]
557
557
  NAM["Parameters"][exchange_parameters_NAM_CK12]["Unit"] = "[h]"
558
+ NAM["Parameters"][exchange_parameters_NAM_CK12]["Convertion Factor"] = [1/3600.0, 1.0] # [sec] -> [h]
558
559
  NAM["Parameters"][exchange_parameters_NAM_CK12]["Range"] = (10.0, 50.0)
559
560
  NAM["Parameters"][exchange_parameters_NAM_CAREA] = {}
560
561
  NAM["Parameters"][exchange_parameters_NAM_CAREA]["Name"] = "C_area"
wolfhece/libs/WolfDll.dll CHANGED
Binary file
wolfhece/pydike.py CHANGED
@@ -63,7 +63,7 @@ class Dike(Triangulation,Zones):
63
63
  nb = int(self.trace.length3D/ds)
64
64
  nb2 = int(max(distup,distdown)/ds)
65
65
 
66
- mytri = myzone.createmultibin(nb,nb2)
66
+ mytri = myzone.create_multibin(nb,nb2)
67
67
  self.tri = mytri.tri
68
68
  self.pts = mytri.pts
69
69
  self.nb_pts = mytri.nb_pts