wolfhece 2.2.37__py3-none-any.whl → 2.2.39__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. wolfhece/Coordinates_operations.py +5 -0
  2. wolfhece/GraphNotebook.py +72 -1
  3. wolfhece/GraphProfile.py +1 -1
  4. wolfhece/MulticriteriAnalysis.py +1579 -0
  5. wolfhece/PandasGrid.py +62 -1
  6. wolfhece/PyCrosssections.py +194 -43
  7. wolfhece/PyDraw.py +891 -73
  8. wolfhece/PyGui.py +913 -72
  9. wolfhece/PyGuiHydrology.py +528 -74
  10. wolfhece/PyPalette.py +26 -4
  11. wolfhece/PyParams.py +33 -0
  12. wolfhece/PyPictures.py +2 -2
  13. wolfhece/PyVertex.py +32 -0
  14. wolfhece/PyVertexvectors.py +147 -75
  15. wolfhece/PyWMS.py +52 -36
  16. wolfhece/acceptability/acceptability.py +15 -8
  17. wolfhece/acceptability/acceptability_gui.py +507 -360
  18. wolfhece/acceptability/func.py +80 -183
  19. wolfhece/apps/version.py +1 -1
  20. wolfhece/compare_series.py +480 -0
  21. wolfhece/drawing_obj.py +12 -1
  22. wolfhece/hydrology/Catchment.py +228 -162
  23. wolfhece/hydrology/Internal_variables.py +43 -2
  24. wolfhece/hydrology/Models_characteristics.py +69 -67
  25. wolfhece/hydrology/Optimisation.py +893 -182
  26. wolfhece/hydrology/PyWatershed.py +267 -165
  27. wolfhece/hydrology/SubBasin.py +185 -140
  28. wolfhece/hydrology/climate_data.py +334 -0
  29. wolfhece/hydrology/constant.py +11 -0
  30. wolfhece/hydrology/cst_exchanges.py +76 -1
  31. wolfhece/hydrology/forcedexchanges.py +413 -49
  32. wolfhece/hydrology/hyetograms.py +2095 -0
  33. wolfhece/hydrology/read.py +65 -5
  34. wolfhece/hydrometry/kiwis.py +42 -26
  35. wolfhece/hydrometry/kiwis_gui.py +7 -2
  36. wolfhece/insyde_be/INBE_func.py +746 -0
  37. wolfhece/insyde_be/INBE_gui.py +1776 -0
  38. wolfhece/insyde_be/__init__.py +3 -0
  39. wolfhece/interpolating_raster.py +366 -0
  40. wolfhece/irm_alaro.py +1457 -0
  41. wolfhece/irm_qdf.py +889 -57
  42. wolfhece/lifewatch.py +6 -3
  43. wolfhece/picc.py +124 -8
  44. wolfhece/pyLandUseFlanders.py +146 -0
  45. wolfhece/pydownloader.py +2 -1
  46. wolfhece/pywalous.py +225 -31
  47. wolfhece/toolshydrology_dll.py +149 -0
  48. wolfhece/wolf_array.py +63 -25
  49. {wolfhece-2.2.37.dist-info → wolfhece-2.2.39.dist-info}/METADATA +3 -1
  50. {wolfhece-2.2.37.dist-info → wolfhece-2.2.39.dist-info}/RECORD +53 -42
  51. {wolfhece-2.2.37.dist-info → wolfhece-2.2.39.dist-info}/WHEEL +0 -0
  52. {wolfhece-2.2.37.dist-info → wolfhece-2.2.39.dist-info}/entry_points.txt +0 -0
  53. {wolfhece-2.2.37.dist-info → wolfhece-2.2.39.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,334 @@
1
+ import pandas as pd
2
+ import geopandas as gpd
3
+
4
+ import numpy as np
5
+ from osgeo import osr, gdal
6
+ from pyproj import Proj, Transformer
7
+ from pathlib import Path
8
+
9
+ import matplotlib.pyplot as plt
10
+ from scipy.spatial import KDTree
11
+ import logging
12
+ from tqdm import tqdm
13
+
14
+ logging.basicConfig(level=logging.INFO)
15
+
16
+ DATADIR = Path(r'P:\Donnees\Pluies\IRM\climateGrid') # Path to the IRM climate data directory - To change if needed
17
+
18
+ def transform_latlon_to_lambert72_list(lat_list:list[float], lon_list:list[float]) -> list[tuple[float, float]]:
19
+ """
20
+ Transform lists of EPSG:4258 coordinates to Lambert 72 coordinates.
21
+
22
+ Coordinates from IRM are in EPSG:4258, and we want to convert them to Lambert 72 (EPSG:31370).
23
+ """
24
+ t = Transformer.from_crs('EPSG:4258', 'EPSG:31370', always_xy=True)
25
+ return [t.transform(lon, lat) for lat, lon in zip(lat_list, lon_list)]
26
+
27
+ def read_pixel_positions(data_dir:Path=DATADIR) -> tuple[list[int], list[tuple[float, float]]]:
28
+ """
29
+ Read pixel positions from the metadata file.
30
+ """
31
+
32
+ file = data_dir / 'climategrid_pixel_metadata.csv'
33
+
34
+ if not file.exists():
35
+ logging.error(f"Metadata file {file} does not exist.")
36
+ return None, None
37
+
38
+ df = pd.read_csv(file,
39
+ sep=";",
40
+ header=0,
41
+ dtype={'PIXEL_ID': int,
42
+ 'PIXEL_LON_CENTER': float,
43
+ 'PIXEL_LAT_CENTER': float},
44
+ index_col='PIXEL_ID')
45
+
46
+ return df.index, transform_latlon_to_lambert72_list(df['PIXEL_LAT_CENTER'].to_list(),
47
+ df['PIXEL_LON_CENTER'].to_list())
48
+
49
+ def convert_pixels_to_squares(pixels:list[tuple[float, float]]) -> tuple[list[tuple[tuple[float, float], ...]], KDTree]:
50
+ """
51
+ From pixels coordinates, define squares around each pixel center.
52
+
53
+ Corners are defined as the average of the pixel center and its neighbors.
54
+ """
55
+
56
+ PIXEL_SIZE = 5000
57
+ NB = len(pixels)
58
+
59
+ pixels = np.array(pixels)
60
+
61
+ # create a KDTree for fast neighbor search
62
+ tree = KDTree(pixels)
63
+
64
+ # find the 4 nearest neighbors for each potential corner
65
+ corner1 = [(p[0] - PIXEL_SIZE / 2, p[1] - PIXEL_SIZE / 2) for p in pixels] # lower-left corner
66
+ corner2 = [(p[0] + PIXEL_SIZE / 2, p[1] - PIXEL_SIZE / 2) for p in pixels] # lower-right corner
67
+ corner3 = [(p[0] + PIXEL_SIZE / 2, p[1] + PIXEL_SIZE / 2) for p in pixels] # upper-right corner
68
+ corner4 = [(p[0] - PIXEL_SIZE / 2, p[1] + PIXEL_SIZE / 2) for p in pixels] # upper-left corner
69
+
70
+ d1, i1 = tree.query(corner1, k=4, distance_upper_bound=PIXEL_SIZE*1.1)
71
+ d2, i2 = tree.query(corner2, k=4, distance_upper_bound=PIXEL_SIZE*1.1)
72
+ d3, i3 = tree.query(corner3, k=4, distance_upper_bound=PIXEL_SIZE*1.1)
73
+ d4, i4 = tree.query(corner4, k=4, distance_upper_bound=PIXEL_SIZE*1.1)
74
+
75
+ squares = []
76
+ for i, pixel in enumerate(pixels):
77
+
78
+ used = i1[i][i1[i] != NB] # filter out the invalid indices
79
+ if len(used) in [1, 3]:
80
+ x1, y1 = pixel[0] - PIXEL_SIZE / 2, pixel[1] - PIXEL_SIZE / 2
81
+ elif len(used) == 2:
82
+ dx = (pixels[used[0], 0] - pixels[used[1], 0])
83
+ dy = (pixels[used[0], 1] - pixels[used[1], 1])
84
+ if abs(dx) < 100:
85
+ x1, y1 = pixel[0] - PIXEL_SIZE / 2, np.asarray([pixels[used,1]]).mean()
86
+ else:
87
+ x1, y1 = np.asarray([pixels[used,0]]).mean(), pixel[1] - PIXEL_SIZE / 2
88
+ else:
89
+ x1, y1 = np.asarray([pixels[used,0]]).mean(), np.asarray([pixels[used,1]]).mean()
90
+
91
+ used = i2[i][i2[i] != NB]
92
+ if len(used) in [1, 3]:
93
+ x2, y2 = pixel[0] + PIXEL_SIZE / 2, pixel[1] - PIXEL_SIZE / 2
94
+ elif len(used) == 2:
95
+ dx = (pixels[used[0], 0] - pixels[used[1], 0])
96
+ dy = (pixels[used[0], 1] - pixels[used[1], 1])
97
+ if abs(dx) < 100:
98
+ x2, y2 = pixel[0] + PIXEL_SIZE / 2, np.asarray([pixels[used,1]]).mean()
99
+ else:
100
+ x2, y2 = np.asarray([pixels[used,0]]).mean(), pixel[1] - PIXEL_SIZE / 2
101
+ else:
102
+ x2, y2 = np.asarray([pixels[used,0]]).mean(), np.asarray([pixels[used,1]]).mean()
103
+
104
+ used = i3[i][i3[i] != NB]
105
+ if len(used) in [1, 3]:
106
+ x3, y3 = pixel[0] + PIXEL_SIZE / 2, pixel[1] + PIXEL_SIZE / 2
107
+ elif len(used) == 2:
108
+ dx = (pixels[used[0], 0] - pixels[used[1], 0])
109
+ dy = (pixels[used[0], 1] - pixels[used[1], 1])
110
+ if abs(dx) < 100:
111
+ x3, y3 = pixel[0] + PIXEL_SIZE / 2, np.asarray([pixels[used,1]]).mean()
112
+ else:
113
+ x3, y3 = np.asarray([pixels[used,0]]).mean(), pixel[1] + PIXEL_SIZE / 2
114
+ else:
115
+ x3, y3 = np.asarray([pixels[used,0]]).mean(), np.asarray([pixels[used,1]]).mean()
116
+
117
+ used = i4[i][i4[i] != NB]
118
+ if len(used) in [1, 3]:
119
+ x4, y4 = pixel[0] - PIXEL_SIZE / 2, pixel[1] + PIXEL_SIZE / 2
120
+ elif len(used) == 2:
121
+ dx = (pixels[used[0], 0] - pixels[used[1], 0])
122
+ dy = (pixels[used[0], 1] - pixels[used[1], 1])
123
+ if abs(dx) < 100:
124
+ x4, y4 = pixel[0] - PIXEL_SIZE / 2, np.asarray([pixels[used,1]]).mean()
125
+ else:
126
+ x4, y4 = np.asarray([pixels[used,0]]).mean(), pixel[1] + PIXEL_SIZE / 2
127
+ else:
128
+ x4, y4 = np.asarray([pixels[used,0]]).mean(), np.asarray([pixels[used,1]]).mean()
129
+
130
+ if x1 == pixel[0]:
131
+ x1 = pixel[0] - PIXEL_SIZE / 2
132
+ if y1 == pixel[1]:
133
+ y1 = pixel[1] - PIXEL_SIZE / 2
134
+ if x2 == pixel[0]:
135
+ x2 = pixel[0] + PIXEL_SIZE / 2
136
+ if y2 == pixel[1]:
137
+ y2 = pixel[1] - PIXEL_SIZE / 2
138
+ if x3 == pixel[0]:
139
+ x3 = pixel[0] + PIXEL_SIZE / 2
140
+ if y3 == pixel[1]:
141
+ y3 = pixel[1] + PIXEL_SIZE / 2
142
+ if x4 == pixel[0]:
143
+ x4 = pixel[0] - PIXEL_SIZE / 2
144
+ if y4 == pixel[1]:
145
+ y4 = pixel[1] + PIXEL_SIZE / 2
146
+
147
+ squares.append(((x1, y1), (x2, y2), (x3, y3), (x4, y4)))
148
+
149
+ return squares, tree
150
+
151
+ def read_historical_year_month(year:int, month:int,
152
+ data_dir:Path=DATADIR) -> pd.DataFrame:
153
+ """
154
+ Read a specific year and month from the climate data.
155
+
156
+ Available variables are :
157
+ - day
158
+ - temp_max
159
+ - temp_min
160
+ - temp_avg
161
+ - precip_quantity
162
+ - humidity_relative
163
+ - pressure
164
+ - sun_duration
165
+ - short_wave_from_sky
166
+ - evapotrans_ref
167
+
168
+ From IRM's Metadata description:
169
+ - TEMP_MAX °C daily maximum temperature from 08:00LT on DATE_BEGIN to 08:00LT on DATE_END+1
170
+ - TEMP_MIN °C daily minimum temperature from 08:00LT on DATE_BEGIN-1 to 08:00LT on DATE_END
171
+ - TEMP_AVG °C average temperature (average of TEMP_MAX and TEMP_MIN)
172
+ - PRECIP_QUANTITY mm precipitation quantity from 08:00LT on DATE_BEGIN to 08:00LT on DATE_END+1
173
+ - HUMIDITY_RELATIVE percentage average relative humidity
174
+ - PRESSURE hPa sea level pressure
175
+ - SUN_DURATION average daily sunshine duration (hours/day)
176
+ - SHORT_WAVE_FROM_SKY average daily global solar radiation (kWh/m2/day)
177
+ - EVAPOTRANS_REF mm reference evapotranspiration ET0
178
+
179
+ :param year: Year to read
180
+ :type year: int
181
+ :param month: Month to read
182
+ :type month: int
183
+ :param variable: Variable to read (e.g., 'temperature', 'precipitation')
184
+ :type variable: str
185
+ :param data_dir: Directory where the data is stored
186
+ :type data_dir: Path
187
+ :return: DataFrame containing the data for the specified year and month
188
+ """
189
+
190
+ # force month to be two digits
191
+ month = f"{month:02d}"
192
+ file_path = data_dir / f"climategrid_{year}{month:}.csv"
193
+
194
+ if file_path.exists():
195
+ logging.info(f"Reading data from {file_path}")
196
+ df = pd.read_csv(file_path, header=0, sep=';', index_col='pixel_id')
197
+
198
+ # conevrt 'day' to datetime UTC
199
+ df['day'] = pd.to_datetime(df['day'], format='%Y/%m/%d', utc=True)
200
+ return df
201
+ else:
202
+ logging.warning(f"File {file_path} does not exist.")
203
+ return pd.DataFrame()
204
+
205
+ def scan_climate_files(data_dir:Path=DATADIR) -> list[Path]:
206
+ """
207
+ Scan the directory for climate data files.
208
+
209
+ :param data_dir: Directory where the data is stored
210
+ :type data_dir: Path
211
+ :return: List of paths to climate data files
212
+ """
213
+ all = list(data_dir.glob('climategrid_*.csv'))
214
+ # all.pop(all.index('climategrid_parameters_description.txt'))
215
+ f = [file.stem for file in all]
216
+ all.pop(f.index('climategrid_pixel_metadata'))
217
+ return all
218
+
219
+ def find_first_available_year_month(data_dir:Path=DATADIR) -> int:
220
+ """
221
+ Find the first available year in the climate data files.
222
+
223
+ :param data_dir: Directory where the data is stored
224
+ :type data_dir: Path
225
+ :return: First available year as an integer
226
+ """
227
+ files = scan_climate_files(data_dir)
228
+ years = [int(file.stem.split('_')[1][:4]) for file in files]
229
+ minyear = min(years) if years else None
230
+ if minyear is not None:
231
+ logging.info(f"First available year: {minyear}")
232
+ #find the first month of the first year
233
+ first_month = min([int(file.stem.split('_')[1][4:6]) for file in files if file.stem.startswith(f'climategrid_{minyear}')])
234
+ logging.info(f"First available month: {first_month}")
235
+ return minyear, first_month
236
+ else:
237
+ logging.warning("No climate data files found.")
238
+ return None, None
239
+
240
+ def find_last_available_year_month(data_dir:Path=DATADIR) -> int:
241
+ """
242
+ Find the last available year in the climate data files.
243
+
244
+ :param data_dir: Directory where the data is stored
245
+ :type data_dir: Path
246
+ :return: Last available year as an integer
247
+ """
248
+ files = scan_climate_files(data_dir)
249
+ years = [int(file.stem.split('_')[1][:4]) for file in files]
250
+ maxyear = max(years) if years else None
251
+ if maxyear is not None:
252
+ logging.info(f"Last available year: {maxyear}")
253
+ #find the last month of the last year
254
+ last_month = max([int(file.stem.split('_')[1][4:6]) for file in files if file.stem.startswith(f'climategrid_{maxyear}')])
255
+ logging.info(f"Last available month: {last_month}")
256
+ return maxyear, last_month
257
+ else:
258
+ logging.warning("No climate data files found.")
259
+ return None, None
260
+
261
+ def read_between(data_dir:Path=DATADIR, start_year:int = 1961, start_month:int = 1, end_year:int = 2025, end_month:int = 6) -> pd.DataFrame:
262
+ """
263
+ Read climate data files into a single DataFrame.
264
+
265
+ :param data_dir: Directory where the data is stored
266
+ :type data_dir: Path
267
+ :return: DataFrame containing all climate data
268
+ """
269
+
270
+ _start_year, _start_month = find_first_available_year_month(data_dir)
271
+ _end_year, _end_month = find_last_available_year_month(data_dir)
272
+
273
+ if start_year < _start_year or (start_year == _start_year and start_month < _start_month):
274
+ logging.warning(f"Start date {start_year}-{start_month} is before the first available data {_start_year}-{_start_month}. Using {_start_year}-{_start_month} instead.")
275
+ start_year, start_month = _start_year, _start_month
276
+
277
+ if end_year > _end_year or (end_year == _end_year and end_month > _end_month):
278
+ logging.warning(f"End date {end_year}-{end_month} is after the last available data {_end_year}-{_end_month}. Using {_end_year}-{_end_month} instead.")
279
+ end_year, end_month = _end_year, _end_month
280
+
281
+ logging.info(f"Reading data from {start_year}-{start_month} to {end_year}-{end_month}")
282
+
283
+ mapped = []
284
+ for year in range(start_year, end_year+1):
285
+ for month in range(1, 13):
286
+ if year == start_year and month < start_month:
287
+ continue
288
+ if year == end_year and month > end_month:
289
+ continue
290
+ mapped.append((year, month))
291
+
292
+ df_list = list(map(lambda ym: read_historical_year_month(ym[0], ym[1], data_dir), mapped))
293
+
294
+ return pd.concat(df_list, axis=0)
295
+
296
+ def read_all_data(data_dir:Path=DATADIR) -> pd.DataFrame:
297
+ """
298
+ Read all climate data files into a single DataFrame.
299
+
300
+ :param data_dir: Directory where the data is stored
301
+ :type data_dir: Path
302
+ :return: DataFrame containing all climate data
303
+ """
304
+
305
+ return read_between(data_dir, 0, 0, 2100, 12)
306
+
307
+ if __name__ == "__main__":
308
+
309
+ print(find_first_available_year_month())
310
+ print(find_last_available_year_month())
311
+
312
+ data = read_all_data()
313
+ print(data.head())
314
+
315
+ pixel_ids, xy = read_pixel_positions()
316
+ print(f"Pixel IDs: {pixel_ids}")
317
+ print(f"Pixel XY: {xy}")
318
+
319
+ squares = convert_pixels_to_squares(xy)
320
+
321
+ xy = np.array(xy)
322
+
323
+ fig, ax = plt.subplots(figsize=(6, 6))
324
+ ax.scatter(xy[:, 0], xy[:, 1], s=1)
325
+ ax.set_title("Pixel Positions in Lambert 72")
326
+ ax.set_xlabel("X (Lambert 72)")
327
+ ax.set_ylabel("Y (Lambert 72)")
328
+
329
+ # plot squares
330
+ for square in squares:
331
+ (x1, y1), (x2, y2), (x3, y3), (x4, y4) = square
332
+ ax.plot([x1, x2, x3, x4, x1], [y1, y2, y3, y4, y1], color='red')
333
+ ax.set_aspect('equal', adjustable='box')
334
+ plt.show()
@@ -22,6 +22,7 @@ tom_2layers_UH = 8
22
22
  tom_HBV = 9
23
23
  tom_SAC_SMA = 10
24
24
  tom_NAM = 11
25
+ tom_SAC_SMA_LROF = 12
25
26
  compare_opti = -1
26
27
 
27
28
 
@@ -36,6 +37,16 @@ tom_netRain_storage = 1
36
37
  tom_transf_no = 0 # aucun modèle de transfert -> utilise les temps estimés
37
38
  tom_transf_cst = 1 # modèle de transfert avec temps constant
38
39
 
40
+ # Type of source/input data
41
+ source_none = -1 #Données source non présente ou non disponible
42
+ source_custom = 0 #Données source sous format personnalisé (un mélange des données ci-dessous)
43
+ source_netcdf = 1 #Données source sous format NetCDF
44
+ source_IRM = 2 #données source sur base des fichiers matriciels IRM (pas journaliers et dispo sur l'Ourthe)
45
+ source_municipality_unit_hyeto = 3 #Données QDF de l'IRM par commune
46
+ source_point_measurements = 4 #Données pluvios SPW
47
+ source_Copernicus = 5 #Données de pluvios ou températures du projet Copernicus en netCDF
48
+ source_dist = 6 #Données de pluvios, températures ou evap maillés (polygon + time serie for each)
49
+
39
50
 
40
51
  ## dictionnay of the default indices for each landuse
41
52
  DEFAULT_LANDUSE = {}
@@ -10,8 +10,9 @@ copying or distribution of this file, via any medium, is strictly prohibited.
10
10
 
11
11
  from . import constant as cst
12
12
  from enum import Enum
13
- # Constants representing the exchanges - Fortran
13
+ # Constants representing the exchanges - Fortran (cfr. Fortran cst_exchange.f90)
14
14
 
15
+ # Types of exchanges between blocks/models
15
16
  exchange_parameters_VHM_Umax = 20 #Paramètre modèle VHM
16
17
  exchange_parameters_VHM_Uevap = 21 #Paramètre modèle VHM
17
18
  exchange_parameters_VHM_au1 = 22 #Paramètre modèle VHM
@@ -59,6 +60,8 @@ exchange_parameters_SAC_riva = 77 #Paramètre modèle SAC-SMA (SACRAM
59
60
  exchange_parameters_SAC_adimp = 78 #Paramètre modèle SAC-SMA (SACRAMENTO)
60
61
  exchange_parameters_SAC_impv = 79 #Paramètre modèle SAC-SMA (SACRAMENTO)
61
62
 
63
+ exchange_parameters_SAC_kof = 120 #Paramètre modèle SAC-SMA (SACRAMENTO) with LR OF
64
+
62
65
  exchange_parameters_NAM_UMAX = 82 #Paramètre modèle NAM
63
66
  exchange_parameters_NAM_TOF = 83 #Paramètre modèle NAM
64
67
  exchange_parameters_NAM_TIF = 84 #Paramètre modèle NAM
@@ -94,6 +97,65 @@ exchange_parameters_Dist_Horton_K = 116 #Paramètre modèle distribué Hor
94
97
  exchange_parameters_Dist_kif = 117 #Paramètre modèle distribué pour réservoir linéaire couche épidermique (if)
95
98
  exchange_parameters_Dist_qlif = 118 #Paramètre modèle distribué pour réservoir linéaire couche épidermique (if)
96
99
 
100
+ # Internal variables ids
101
+ iv_VHM_qof = 201
102
+ iv_VHM_qif = 202
103
+ iv_VHM_qbf = 203
104
+ iv_VHM_U = 204
105
+ iv_VHM_xu = 205
106
+ iv_VHM_xof = 206
107
+ iv_VHM_xif = 207
108
+ iv_VHM_xbf = 208
109
+
110
+ iv_2layers_linBF_qof = 701
111
+ iv_2layers_linBF_qif = 702
112
+ iv_2layers_linBF_U = 703
113
+ iv_2layers_linBF_S = 704
114
+ iv_2layers_linBF_xif = 705
115
+ iv_2layers_linBF_xp = 706
116
+
117
+ iv_HBV_qr = 901
118
+ iv_HBV_qif = 902
119
+ iv_HBV_qbf = 903
120
+ iv_HBV_qrech = 904
121
+ iv_HBV_soil_qcap = 905
122
+ iv_HBV_qperc = 906
123
+ iv_HBV_UZ_qcap = 907
124
+ iv_HBV_U = 908
125
+ iv_HBV_Su = 909
126
+ iv_HBV_etr = 910
127
+
128
+ iv_SACSMA_qof = 1001
129
+ iv_SACSMA_qif = 1002
130
+ iv_SACSMA_qbf = 1003
131
+ iv_SACSMA_qsubbf = 1004
132
+ iv_SACSMA_qsurf = 1005
133
+ iv_SACSMA_qbase = 1006
134
+ iv_SACSMA_etot = 1007
135
+ iv_SACSMA_e1 = 1008
136
+ iv_SACSMA_e2 = 1009
137
+ iv_SACSMA_e5 = 1010
138
+ iv_SACSMA_qqif = 1011
139
+ iv_SACSMA_qqsr = 1012
140
+ iv_SACSMA_qqdr = 1013
141
+ iv_SACSMA_CUZTW = 1014
142
+ iv_SACSMA_CUZFW = 1015
143
+ iv_SACSMA_CADIMP = 1016
144
+ iv_SACSMA_CLZTW = 1017
145
+ iv_SACSMA_CLZFP = 1018
146
+ iv_SACSMA_CLZFS = 1019
147
+ iv_SACSMA_e3 = 1020
148
+ iv_SACSMA_qoutLR = 1021
149
+
150
+ iv_NAM_qof = 1101
151
+ iv_NAM_qif = 1102
152
+ iv_NAM_qbf = 1103
153
+ iv_NAM_ea = 1104
154
+ iv_NAM_erz = 1105
155
+ iv_NAM_qg = 1106
156
+ iv_NAM_U = 1107
157
+ iv_NAM_L = 1108
158
+
97
159
 
98
160
  # Constants representing the exchanges - Python
99
161
  exchange_parameters_py_timeDelay = -11
@@ -497,6 +559,17 @@ SAC_SMA["Parameters"][exchange_parameters_SAC_impv]["Unit"] = "[-]"
497
559
  SAC_SMA["Parameters"][exchange_parameters_SAC_impv]["Range"] = (0.0, 0.05)
498
560
 
499
561
 
562
+ SAC_SMA_LROF = SAC_SMA.copy()
563
+ SAC_SMA_LROF["Nb"] = 17
564
+ SAC_SMA_LROF["Parameters"][exchange_parameters_SAC_kof] = {}
565
+ SAC_SMA_LROF["Parameters"][exchange_parameters_SAC_kof]["Name"] = "Kof"
566
+ SAC_SMA_LROF["Parameters"][exchange_parameters_SAC_kof]["File"] = "simul_of.param"
567
+ SAC_SMA_LROF["Parameters"][exchange_parameters_SAC_kof]["Group"] = "Time Parameters"
568
+ SAC_SMA_LROF["Parameters"][exchange_parameters_SAC_kof]["Key"] = "Lagtime"
569
+ SAC_SMA_LROF["Parameters"][exchange_parameters_SAC_kof]["Unit"] = "[sec]"
570
+ SAC_SMA_LROF["Parameters"][exchange_parameters_SAC_kof]["Convertion Factor"] = 1/3600.0 # [sec] -> [h]
571
+
572
+
500
573
  NAM = {}
501
574
  NAM["Nb"] = 10
502
575
  NAM["Parameters"] = {}
@@ -583,6 +656,7 @@ modelParamsDict[cst.tom_2layers_linIF]= UHDIST_LINBF
583
656
  modelParamsDict[cst.tom_HBV]= HBV
584
657
  modelParamsDict[cst.tom_SAC_SMA]= SAC_SMA
585
658
  modelParamsDict[cst.tom_NAM]= NAM
659
+ modelParamsDict[cst.tom_SAC_SMA_LROF]= SAC_SMA_LROF
586
660
 
587
661
  # %% Python-Fortran exchange constants
588
662
 
@@ -590,6 +664,7 @@ ptr_params = 1
590
664
  ptr_opti_factors = 2
591
665
  ptr_q_all = 3
592
666
  ptr_time_delays = 4
667
+ ptr_iv_saved = 5
593
668
 
594
669
  fptr_update = 1
595
670
  fptr_get_cvg = 2