wolfhece 2.2.37__py3-none-any.whl → 2.2.38__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- wolfhece/PyVertex.py +7 -0
- wolfhece/PyVertexvectors.py +53 -47
- wolfhece/apps/version.py +1 -1
- wolfhece/hydrology/climate_data.py +334 -0
- wolfhece/hydrology/constant.py +11 -0
- wolfhece/hydrology/hyetograms.py +2095 -0
- wolfhece/hydrometry/kiwis.py +28 -19
- wolfhece/hydrometry/kiwis_gui.py +7 -2
- {wolfhece-2.2.37.dist-info → wolfhece-2.2.38.dist-info}/METADATA +1 -1
- {wolfhece-2.2.37.dist-info → wolfhece-2.2.38.dist-info}/RECORD +13 -11
- {wolfhece-2.2.37.dist-info → wolfhece-2.2.38.dist-info}/WHEEL +0 -0
- {wolfhece-2.2.37.dist-info → wolfhece-2.2.38.dist-info}/entry_points.txt +0 -0
- {wolfhece-2.2.37.dist-info → wolfhece-2.2.38.dist-info}/top_level.txt +0 -0
wolfhece/PyVertex.py
CHANGED
@@ -1061,6 +1061,13 @@ class cloud_vertices(Element_To_Draw):
|
|
1061
1061
|
|
1062
1062
|
if self.myprop.legendvisible:
|
1063
1063
|
|
1064
|
+
dx = xmax - xmin
|
1065
|
+
dy = ymax - ymin
|
1066
|
+
|
1067
|
+
if dx > 50_000. or dy > 50_000.:
|
1068
|
+
logging.warning(_('Too large bounds for legend plot -- skipping'))
|
1069
|
+
return
|
1070
|
+
|
1064
1071
|
mapviewer = self.get_mapviewer()
|
1065
1072
|
|
1066
1073
|
which_legend = self.myprop.legendtext
|
wolfhece/PyVertexvectors.py
CHANGED
@@ -1336,6 +1336,8 @@ class vector:
|
|
1336
1336
|
self._lengthparts2D=None
|
1337
1337
|
self._lengthparts3D=None
|
1338
1338
|
|
1339
|
+
self._simplified_geometry = False
|
1340
|
+
|
1339
1341
|
if type(lines)==list:
|
1340
1342
|
if len(lines)>0:
|
1341
1343
|
self.myname=lines[0]
|
@@ -2373,30 +2375,32 @@ class vector:
|
|
2373
2375
|
return tri.get_triangles_as_listwolfvertices()
|
2374
2376
|
|
2375
2377
|
else:
|
2376
|
-
|
2377
|
-
|
2378
|
-
|
2379
|
-
|
2380
|
-
|
2378
|
+
if self._simplified_geometry:
|
2379
|
+
if self.myprop.closed and (self.myvertices[0].x != self.myvertices[-1].x or self.myvertices[0].y != self.myvertices[-1].y):
|
2380
|
+
return [self.myvertices + [self.myvertices[0]]]
|
2381
|
+
else:
|
2382
|
+
return [self.myvertices]
|
2383
|
+
else:
|
2384
|
+
xx, yy = self.polygon.exterior.xy
|
2381
2385
|
|
2382
|
-
|
2383
|
-
|
2384
|
-
|
2386
|
+
# On translate les coordonnées pour éviter les erreurs de triangulation
|
2387
|
+
tr_x = np.array(xx).min()
|
2388
|
+
tr_y = np.array(yy).min()
|
2385
2389
|
|
2386
|
-
|
2387
|
-
|
2390
|
+
xx = np.array(xx)-tr_x
|
2391
|
+
yy = np.array(yy)-tr_y
|
2388
2392
|
|
2389
|
-
|
2393
|
+
geom = {'vertices' : [[x,y] for x,y in zip(xx[:-1],yy[:-1])], 'segments' : [[i,i+1] for i in range(len(xx)-2)]+[[len(xx)-2,0]]}
|
2390
2394
|
|
2391
|
-
|
2392
|
-
|
2393
|
-
|
2394
|
-
|
2395
|
-
|
2396
|
-
|
2397
|
-
|
2398
|
-
|
2399
|
-
|
2395
|
+
try:
|
2396
|
+
delaunay = triangle.triangulate(geom, 'p')
|
2397
|
+
tri = []
|
2398
|
+
for curtri in delaunay['triangles']:
|
2399
|
+
# on traduit les coordonnées pour revenir dans le monde réel
|
2400
|
+
tri.append([wolfvertex(delaunay['vertices'][curtri[i]][0] + tr_x, delaunay['vertices'][curtri[i]][1] + tr_y) for i in range(3)])
|
2401
|
+
return tri
|
2402
|
+
except:
|
2403
|
+
pass
|
2400
2404
|
|
2401
2405
|
else:
|
2402
2406
|
if self.has_interior:
|
@@ -3521,6 +3525,12 @@ class vector:
|
|
3521
3525
|
self.myprop.legendy = centroid.y
|
3522
3526
|
self.myprop.legendtext = text if text else self.myname
|
3523
3527
|
|
3528
|
+
def set_legend_visible(self, visible:bool=True):
|
3529
|
+
"""
|
3530
|
+
Set the visibility of the legend.
|
3531
|
+
"""
|
3532
|
+
self.myprop.legendvisible = visible
|
3533
|
+
|
3524
3534
|
def set_legend_position_to_centroid(self):
|
3525
3535
|
"""
|
3526
3536
|
Positionne la légende au centre du vecteur
|
@@ -4133,8 +4143,8 @@ class zone:
|
|
4133
4143
|
|
4134
4144
|
def check_if_interior_exists(self):
|
4135
4145
|
""" Check if the zone has at least one vector with interior points """
|
4136
|
-
|
4137
|
-
|
4146
|
+
|
4147
|
+
list(map(lambda curvec: curvec.check_if_interior_exists(), self.myvectors))
|
4138
4148
|
|
4139
4149
|
def add_values(self, key:str, values:np.ndarray):
|
4140
4150
|
""" add values to the zone """
|
@@ -4143,8 +4153,7 @@ class zone:
|
|
4143
4153
|
logging.warning(_('Number of vectors and values do not match'))
|
4144
4154
|
return
|
4145
4155
|
|
4146
|
-
|
4147
|
-
curvec.add_value(key, curval)
|
4156
|
+
list(map(lambda cur: cur[0].add_value(key, cur[1]), zip(self.myvectors, values)))
|
4148
4157
|
|
4149
4158
|
def get_values(self, key:str) -> np.ndarray:
|
4150
4159
|
""" get values from the zone """
|
@@ -4154,25 +4163,21 @@ class zone:
|
|
4154
4163
|
def set_colors_from_value(self, key:str, cmap:wolfpalette | Colormap | cm.ScalarMappable, vmin:float= 0., vmax:float= 1.):
|
4155
4164
|
""" Set the colors for the zone """
|
4156
4165
|
|
4157
|
-
|
4158
|
-
curvec.set_color_from_value(key, cmap, vmin, vmax)
|
4166
|
+
list(map(lambda curvec: curvec.set_color_from_value(key, cmap, vmin, vmax), self.myvectors))
|
4159
4167
|
|
4160
4168
|
def set_alpha(self, alpha:int):
|
4161
4169
|
""" Set the alpha for the zone """
|
4162
4170
|
|
4163
|
-
|
4164
|
-
curvec.set_alpha(alpha)
|
4171
|
+
list(map(lambda curvec: curvec.set_alpha(alpha), self.myvectors))
|
4165
4172
|
|
4166
4173
|
def set_filled(self, filled:bool):
|
4167
4174
|
""" Set the filled for the zone """
|
4168
4175
|
|
4169
|
-
|
4170
|
-
curvec.set_filled(filled)
|
4176
|
+
list(map(lambda curvec: curvec.set_filled(filled), self.myvectors))
|
4171
4177
|
|
4172
4178
|
def check_if_open(self):
|
4173
4179
|
""" Check if the vectors in the zone are open """
|
4174
|
-
|
4175
|
-
curvec.check_if_open()
|
4180
|
+
list(map(lambda curvect: curvect.check_if_open(), self.myvectors))
|
4176
4181
|
|
4177
4182
|
def buffer(self, distance:float, resolution:int=16, inplace:bool = False) -> 'zone':
|
4178
4183
|
""" Create a new zone with a buffer around each vector """
|
@@ -4197,24 +4202,21 @@ class zone:
|
|
4197
4202
|
Set the legend text for the zone
|
4198
4203
|
"""
|
4199
4204
|
|
4200
|
-
|
4201
|
-
curvect.set_legend_text(text)
|
4205
|
+
list(map(lambda curvect: curvect.set_legend_text(text), self.myvectors))
|
4202
4206
|
|
4203
4207
|
def set_legend_text_from_values(self, key:str):
|
4204
4208
|
"""
|
4205
4209
|
Set the legend text for the zone from a value
|
4206
4210
|
"""
|
4207
4211
|
|
4208
|
-
|
4209
|
-
|
4212
|
+
list(map(lambda curvect: curvect.set_legend_text_from_value(key), self.myvectors))
|
4213
|
+
|
4210
4214
|
|
4211
4215
|
def set_legend_position(self, x, y):
|
4212
4216
|
"""
|
4213
4217
|
Set the legend position for the zone
|
4214
4218
|
"""
|
4215
|
-
|
4216
|
-
for curvect in self.myvectors:
|
4217
|
-
curvect.set_legend_position(x, y)
|
4219
|
+
list(map(lambda curvect: curvect.set_legend_position(x, y), self.myvectors))
|
4218
4220
|
|
4219
4221
|
@property
|
4220
4222
|
def area(self):
|
@@ -4230,15 +4232,14 @@ class zone:
|
|
4230
4232
|
"""
|
4231
4233
|
Set the cache for the zone and all its vectors
|
4232
4234
|
"""
|
4233
|
-
|
4234
|
-
|
4235
|
+
|
4236
|
+
list(map(lambda curvect: curvect.set_cache(), self.myvectors))
|
4235
4237
|
|
4236
4238
|
def clear_cache(self):
|
4237
4239
|
"""
|
4238
4240
|
Clear the cache for the zone and all its vectors
|
4239
4241
|
"""
|
4240
|
-
|
4241
|
-
curvect.clear_cache()
|
4242
|
+
list(map(lambda curvect: curvect.clear_cache(), self.myvectors))
|
4242
4243
|
|
4243
4244
|
self._move_start = None
|
4244
4245
|
self._move_step = None
|
@@ -4598,8 +4599,9 @@ class zone:
|
|
4598
4599
|
else:
|
4599
4600
|
fig = ax.figure
|
4600
4601
|
|
4601
|
-
for curvect in self.myvectors:
|
4602
|
-
|
4602
|
+
# for curvect in self.myvectors:
|
4603
|
+
# curvect.plot_matplotlib(ax)
|
4604
|
+
list(map(lambda curvect: curvect.plot_matplotlib(ax), self.myvectors))
|
4603
4605
|
|
4604
4606
|
return fig, ax
|
4605
4607
|
|
@@ -6290,9 +6292,13 @@ class zone:
|
|
6290
6292
|
"""
|
6291
6293
|
Set the legend to the centroid of the vectors
|
6292
6294
|
"""
|
6295
|
+
list(map(lambda curvec: curvec.set_legend_to_centroid(), self.myvectors))
|
6293
6296
|
|
6294
|
-
|
6295
|
-
|
6297
|
+
def set_legend_visible(self, visible:bool=True):
|
6298
|
+
"""
|
6299
|
+
Set the visibility of the legend for all vectors in the zone
|
6300
|
+
"""
|
6301
|
+
list(map(lambda curvec: curvec.set_legend_visible(visible), self.myvectors))
|
6296
6302
|
|
6297
6303
|
class Zones(wx.Frame, Element_To_Draw):
|
6298
6304
|
"""
|
wolfhece/apps/version.py
CHANGED
@@ -0,0 +1,334 @@
|
|
1
|
+
import pandas as pd
|
2
|
+
import geopandas as gpd
|
3
|
+
|
4
|
+
import numpy as np
|
5
|
+
from osgeo import osr, gdal
|
6
|
+
from pyproj import Proj, Transformer
|
7
|
+
from pathlib import Path
|
8
|
+
|
9
|
+
import matplotlib.pyplot as plt
|
10
|
+
from scipy.spatial import KDTree
|
11
|
+
import logging
|
12
|
+
from tqdm import tqdm
|
13
|
+
|
14
|
+
logging.basicConfig(level=logging.INFO)
|
15
|
+
|
16
|
+
DATADIR = Path(r'P:\Donnees\Pluies\IRM\climateGrid') # Path to the IRM climate data directory - To change if needed
|
17
|
+
|
18
|
+
def transform_latlon_to_lambert72_list(lat_list:list[float], lon_list:list[float]) -> list[tuple[float, float]]:
|
19
|
+
"""
|
20
|
+
Transform lists of EPSG:4258 coordinates to Lambert 72 coordinates.
|
21
|
+
|
22
|
+
Coordinates from IRM are in EPSG:4258, and we want to convert them to Lambert 72 (EPSG:31370).
|
23
|
+
"""
|
24
|
+
t = Transformer.from_crs('EPSG:4258', 'EPSG:31370', always_xy=True)
|
25
|
+
return [t.transform(lon, lat) for lat, lon in zip(lat_list, lon_list)]
|
26
|
+
|
27
|
+
def read_pixel_positions(data_dir:Path=DATADIR) -> tuple[list[int], list[tuple[float, float]]]:
|
28
|
+
"""
|
29
|
+
Read pixel positions from the metadata file.
|
30
|
+
"""
|
31
|
+
|
32
|
+
file = data_dir / 'climategrid_pixel_metadata.csv'
|
33
|
+
|
34
|
+
if not file.exists():
|
35
|
+
logging.error(f"Metadata file {file} does not exist.")
|
36
|
+
return None, None
|
37
|
+
|
38
|
+
df = pd.read_csv(file,
|
39
|
+
sep=";",
|
40
|
+
header=0,
|
41
|
+
dtype={'PIXEL_ID': int,
|
42
|
+
'PIXEL_LON_CENTER': float,
|
43
|
+
'PIXEL_LAT_CENTER': float},
|
44
|
+
index_col='PIXEL_ID')
|
45
|
+
|
46
|
+
return df.index, transform_latlon_to_lambert72_list(df['PIXEL_LAT_CENTER'].to_list(),
|
47
|
+
df['PIXEL_LON_CENTER'].to_list())
|
48
|
+
|
49
|
+
def convert_pixels_to_squares(pixels:list[tuple[float, float]]) -> tuple[list[tuple[tuple[float, float], ...]], KDTree]:
|
50
|
+
"""
|
51
|
+
From pixels coordinates, define squares around each pixel center.
|
52
|
+
|
53
|
+
Corners are defined as the average of the pixel center and its neighbors.
|
54
|
+
"""
|
55
|
+
|
56
|
+
PIXEL_SIZE = 5000
|
57
|
+
NB = len(pixels)
|
58
|
+
|
59
|
+
pixels = np.array(pixels)
|
60
|
+
|
61
|
+
# create a KDTree for fast neighbor search
|
62
|
+
tree = KDTree(pixels)
|
63
|
+
|
64
|
+
# find the 4 nearest neighbors for each potential corner
|
65
|
+
corner1 = [(p[0] - PIXEL_SIZE / 2, p[1] - PIXEL_SIZE / 2) for p in pixels] # lower-left corner
|
66
|
+
corner2 = [(p[0] + PIXEL_SIZE / 2, p[1] - PIXEL_SIZE / 2) for p in pixels] # lower-right corner
|
67
|
+
corner3 = [(p[0] + PIXEL_SIZE / 2, p[1] + PIXEL_SIZE / 2) for p in pixels] # upper-right corner
|
68
|
+
corner4 = [(p[0] - PIXEL_SIZE / 2, p[1] + PIXEL_SIZE / 2) for p in pixels] # upper-left corner
|
69
|
+
|
70
|
+
d1, i1 = tree.query(corner1, k=4, distance_upper_bound=PIXEL_SIZE*1.1)
|
71
|
+
d2, i2 = tree.query(corner2, k=4, distance_upper_bound=PIXEL_SIZE*1.1)
|
72
|
+
d3, i3 = tree.query(corner3, k=4, distance_upper_bound=PIXEL_SIZE*1.1)
|
73
|
+
d4, i4 = tree.query(corner4, k=4, distance_upper_bound=PIXEL_SIZE*1.1)
|
74
|
+
|
75
|
+
squares = []
|
76
|
+
for i, pixel in enumerate(pixels):
|
77
|
+
|
78
|
+
used = i1[i][i1[i] != NB] # filter out the invalid indices
|
79
|
+
if len(used) in [1, 3]:
|
80
|
+
x1, y1 = pixel[0] - PIXEL_SIZE / 2, pixel[1] - PIXEL_SIZE / 2
|
81
|
+
elif len(used) == 2:
|
82
|
+
dx = (pixels[used[0], 0] - pixels[used[1], 0])
|
83
|
+
dy = (pixels[used[0], 1] - pixels[used[1], 1])
|
84
|
+
if abs(dx) < 100:
|
85
|
+
x1, y1 = pixel[0] - PIXEL_SIZE / 2, np.asarray([pixels[used,1]]).mean()
|
86
|
+
else:
|
87
|
+
x1, y1 = np.asarray([pixels[used,0]]).mean(), pixel[1] - PIXEL_SIZE / 2
|
88
|
+
else:
|
89
|
+
x1, y1 = np.asarray([pixels[used,0]]).mean(), np.asarray([pixels[used,1]]).mean()
|
90
|
+
|
91
|
+
used = i2[i][i2[i] != NB]
|
92
|
+
if len(used) in [1, 3]:
|
93
|
+
x2, y2 = pixel[0] + PIXEL_SIZE / 2, pixel[1] - PIXEL_SIZE / 2
|
94
|
+
elif len(used) == 2:
|
95
|
+
dx = (pixels[used[0], 0] - pixels[used[1], 0])
|
96
|
+
dy = (pixels[used[0], 1] - pixels[used[1], 1])
|
97
|
+
if abs(dx) < 100:
|
98
|
+
x2, y2 = pixel[0] + PIXEL_SIZE / 2, np.asarray([pixels[used,1]]).mean()
|
99
|
+
else:
|
100
|
+
x2, y2 = np.asarray([pixels[used,0]]).mean(), pixel[1] - PIXEL_SIZE / 2
|
101
|
+
else:
|
102
|
+
x2, y2 = np.asarray([pixels[used,0]]).mean(), np.asarray([pixels[used,1]]).mean()
|
103
|
+
|
104
|
+
used = i3[i][i3[i] != NB]
|
105
|
+
if len(used) in [1, 3]:
|
106
|
+
x3, y3 = pixel[0] + PIXEL_SIZE / 2, pixel[1] + PIXEL_SIZE / 2
|
107
|
+
elif len(used) == 2:
|
108
|
+
dx = (pixels[used[0], 0] - pixels[used[1], 0])
|
109
|
+
dy = (pixels[used[0], 1] - pixels[used[1], 1])
|
110
|
+
if abs(dx) < 100:
|
111
|
+
x3, y3 = pixel[0] + PIXEL_SIZE / 2, np.asarray([pixels[used,1]]).mean()
|
112
|
+
else:
|
113
|
+
x3, y3 = np.asarray([pixels[used,0]]).mean(), pixel[1] + PIXEL_SIZE / 2
|
114
|
+
else:
|
115
|
+
x3, y3 = np.asarray([pixels[used,0]]).mean(), np.asarray([pixels[used,1]]).mean()
|
116
|
+
|
117
|
+
used = i4[i][i4[i] != NB]
|
118
|
+
if len(used) in [1, 3]:
|
119
|
+
x4, y4 = pixel[0] - PIXEL_SIZE / 2, pixel[1] + PIXEL_SIZE / 2
|
120
|
+
elif len(used) == 2:
|
121
|
+
dx = (pixels[used[0], 0] - pixels[used[1], 0])
|
122
|
+
dy = (pixels[used[0], 1] - pixels[used[1], 1])
|
123
|
+
if abs(dx) < 100:
|
124
|
+
x4, y4 = pixel[0] - PIXEL_SIZE / 2, np.asarray([pixels[used,1]]).mean()
|
125
|
+
else:
|
126
|
+
x4, y4 = np.asarray([pixels[used,0]]).mean(), pixel[1] + PIXEL_SIZE / 2
|
127
|
+
else:
|
128
|
+
x4, y4 = np.asarray([pixels[used,0]]).mean(), np.asarray([pixels[used,1]]).mean()
|
129
|
+
|
130
|
+
if x1 == pixel[0]:
|
131
|
+
x1 = pixel[0] - PIXEL_SIZE / 2
|
132
|
+
if y1 == pixel[1]:
|
133
|
+
y1 = pixel[1] - PIXEL_SIZE / 2
|
134
|
+
if x2 == pixel[0]:
|
135
|
+
x2 = pixel[0] + PIXEL_SIZE / 2
|
136
|
+
if y2 == pixel[1]:
|
137
|
+
y2 = pixel[1] - PIXEL_SIZE / 2
|
138
|
+
if x3 == pixel[0]:
|
139
|
+
x3 = pixel[0] + PIXEL_SIZE / 2
|
140
|
+
if y3 == pixel[1]:
|
141
|
+
y3 = pixel[1] + PIXEL_SIZE / 2
|
142
|
+
if x4 == pixel[0]:
|
143
|
+
x4 = pixel[0] - PIXEL_SIZE / 2
|
144
|
+
if y4 == pixel[1]:
|
145
|
+
y4 = pixel[1] + PIXEL_SIZE / 2
|
146
|
+
|
147
|
+
squares.append(((x1, y1), (x2, y2), (x3, y3), (x4, y4)))
|
148
|
+
|
149
|
+
return squares, tree
|
150
|
+
|
151
|
+
def read_historical_year_month(year:int, month:int,
|
152
|
+
data_dir:Path=DATADIR) -> pd.DataFrame:
|
153
|
+
"""
|
154
|
+
Read a specific year and month from the climate data.
|
155
|
+
|
156
|
+
Available variables are :
|
157
|
+
- day
|
158
|
+
- temp_max
|
159
|
+
- temp_min
|
160
|
+
- temp_avg
|
161
|
+
- precip_quantity
|
162
|
+
- humidity_relative
|
163
|
+
- pressure
|
164
|
+
- sun_duration
|
165
|
+
- short_wave_from_sky
|
166
|
+
- evapotrans_ref
|
167
|
+
|
168
|
+
From IRM's Metadata description:
|
169
|
+
- TEMP_MAX °C daily maximum temperature from 08:00LT on DATE_BEGIN to 08:00LT on DATE_END+1
|
170
|
+
- TEMP_MIN °C daily minimum temperature from 08:00LT on DATE_BEGIN-1 to 08:00LT on DATE_END
|
171
|
+
- TEMP_AVG °C average temperature (average of TEMP_MAX and TEMP_MIN)
|
172
|
+
- PRECIP_QUANTITY mm precipitation quantity from 08:00LT on DATE_BEGIN to 08:00LT on DATE_END+1
|
173
|
+
- HUMIDITY_RELATIVE percentage average relative humidity
|
174
|
+
- PRESSURE hPa sea level pressure
|
175
|
+
- SUN_DURATION average daily sunshine duration (hours/day)
|
176
|
+
- SHORT_WAVE_FROM_SKY average daily global solar radiation (kWh/m2/day)
|
177
|
+
- EVAPOTRANS_REF mm reference evapotranspiration ET0
|
178
|
+
|
179
|
+
:param year: Year to read
|
180
|
+
:type year: int
|
181
|
+
:param month: Month to read
|
182
|
+
:type month: int
|
183
|
+
:param variable: Variable to read (e.g., 'temperature', 'precipitation')
|
184
|
+
:type variable: str
|
185
|
+
:param data_dir: Directory where the data is stored
|
186
|
+
:type data_dir: Path
|
187
|
+
:return: DataFrame containing the data for the specified year and month
|
188
|
+
"""
|
189
|
+
|
190
|
+
# force month to be two digits
|
191
|
+
month = f"{month:02d}"
|
192
|
+
file_path = data_dir / f"climategrid_{year}{month:}.csv"
|
193
|
+
|
194
|
+
if file_path.exists():
|
195
|
+
logging.info(f"Reading data from {file_path}")
|
196
|
+
df = pd.read_csv(file_path, header=0, sep=';', index_col='pixel_id')
|
197
|
+
|
198
|
+
# conevrt 'day' to datetime UTC
|
199
|
+
df['day'] = pd.to_datetime(df['day'], format='%Y/%m/%d', utc=True)
|
200
|
+
return df
|
201
|
+
else:
|
202
|
+
logging.warning(f"File {file_path} does not exist.")
|
203
|
+
return pd.DataFrame()
|
204
|
+
|
205
|
+
def scan_climate_files(data_dir:Path=DATADIR) -> list[Path]:
|
206
|
+
"""
|
207
|
+
Scan the directory for climate data files.
|
208
|
+
|
209
|
+
:param data_dir: Directory where the data is stored
|
210
|
+
:type data_dir: Path
|
211
|
+
:return: List of paths to climate data files
|
212
|
+
"""
|
213
|
+
all = list(data_dir.glob('climategrid_*.csv'))
|
214
|
+
# all.pop(all.index('climategrid_parameters_description.txt'))
|
215
|
+
f = [file.stem for file in all]
|
216
|
+
all.pop(f.index('climategrid_pixel_metadata'))
|
217
|
+
return all
|
218
|
+
|
219
|
+
def find_first_available_year_month(data_dir:Path=DATADIR) -> int:
|
220
|
+
"""
|
221
|
+
Find the first available year in the climate data files.
|
222
|
+
|
223
|
+
:param data_dir: Directory where the data is stored
|
224
|
+
:type data_dir: Path
|
225
|
+
:return: First available year as an integer
|
226
|
+
"""
|
227
|
+
files = scan_climate_files(data_dir)
|
228
|
+
years = [int(file.stem.split('_')[1][:4]) for file in files]
|
229
|
+
minyear = min(years) if years else None
|
230
|
+
if minyear is not None:
|
231
|
+
logging.info(f"First available year: {minyear}")
|
232
|
+
#find the first month of the first year
|
233
|
+
first_month = min([int(file.stem.split('_')[1][4:6]) for file in files if file.stem.startswith(f'climategrid_{minyear}')])
|
234
|
+
logging.info(f"First available month: {first_month}")
|
235
|
+
return minyear, first_month
|
236
|
+
else:
|
237
|
+
logging.warning("No climate data files found.")
|
238
|
+
return None, None
|
239
|
+
|
240
|
+
def find_last_available_year_month(data_dir:Path=DATADIR) -> int:
|
241
|
+
"""
|
242
|
+
Find the last available year in the climate data files.
|
243
|
+
|
244
|
+
:param data_dir: Directory where the data is stored
|
245
|
+
:type data_dir: Path
|
246
|
+
:return: Last available year as an integer
|
247
|
+
"""
|
248
|
+
files = scan_climate_files(data_dir)
|
249
|
+
years = [int(file.stem.split('_')[1][:4]) for file in files]
|
250
|
+
maxyear = max(years) if years else None
|
251
|
+
if maxyear is not None:
|
252
|
+
logging.info(f"Last available year: {maxyear}")
|
253
|
+
#find the last month of the last year
|
254
|
+
last_month = max([int(file.stem.split('_')[1][4:6]) for file in files if file.stem.startswith(f'climategrid_{maxyear}')])
|
255
|
+
logging.info(f"Last available month: {last_month}")
|
256
|
+
return maxyear, last_month
|
257
|
+
else:
|
258
|
+
logging.warning("No climate data files found.")
|
259
|
+
return None, None
|
260
|
+
|
261
|
+
def read_between(data_dir:Path=DATADIR, start_year:int = 1961, start_month:int = 1, end_year:int = 2025, end_month:int = 6) -> pd.DataFrame:
|
262
|
+
"""
|
263
|
+
Read climate data files into a single DataFrame.
|
264
|
+
|
265
|
+
:param data_dir: Directory where the data is stored
|
266
|
+
:type data_dir: Path
|
267
|
+
:return: DataFrame containing all climate data
|
268
|
+
"""
|
269
|
+
|
270
|
+
_start_year, _start_month = find_first_available_year_month(data_dir)
|
271
|
+
_end_year, _end_month = find_last_available_year_month(data_dir)
|
272
|
+
|
273
|
+
if start_year < _start_year or (start_year == _start_year and start_month < _start_month):
|
274
|
+
logging.warning(f"Start date {start_year}-{start_month} is before the first available data {_start_year}-{_start_month}. Using {_start_year}-{_start_month} instead.")
|
275
|
+
start_year, start_month = _start_year, _start_month
|
276
|
+
|
277
|
+
if end_year > _end_year or (end_year == _end_year and end_month > _end_month):
|
278
|
+
logging.warning(f"End date {end_year}-{end_month} is after the last available data {_end_year}-{_end_month}. Using {_end_year}-{_end_month} instead.")
|
279
|
+
end_year, end_month = _end_year, _end_month
|
280
|
+
|
281
|
+
logging.info(f"Reading data from {start_year}-{start_month} to {end_year}-{end_month}")
|
282
|
+
|
283
|
+
mapped = []
|
284
|
+
for year in range(start_year, end_year+1):
|
285
|
+
for month in range(1, 13):
|
286
|
+
if year == start_year and month < start_month:
|
287
|
+
continue
|
288
|
+
if year == end_year and month > end_month:
|
289
|
+
continue
|
290
|
+
mapped.append((year, month))
|
291
|
+
|
292
|
+
df_list = list(map(lambda ym: read_historical_year_month(ym[0], ym[1], data_dir), mapped))
|
293
|
+
|
294
|
+
return pd.concat(df_list, axis=0)
|
295
|
+
|
296
|
+
def read_all_data(data_dir:Path=DATADIR) -> pd.DataFrame:
|
297
|
+
"""
|
298
|
+
Read all climate data files into a single DataFrame.
|
299
|
+
|
300
|
+
:param data_dir: Directory where the data is stored
|
301
|
+
:type data_dir: Path
|
302
|
+
:return: DataFrame containing all climate data
|
303
|
+
"""
|
304
|
+
|
305
|
+
return read_between(data_dir, 0, 0, 2100, 12)
|
306
|
+
|
307
|
+
if __name__ == "__main__":
|
308
|
+
|
309
|
+
print(find_first_available_year_month())
|
310
|
+
print(find_last_available_year_month())
|
311
|
+
|
312
|
+
data = read_all_data()
|
313
|
+
print(data.head())
|
314
|
+
|
315
|
+
pixel_ids, xy = read_pixel_positions()
|
316
|
+
print(f"Pixel IDs: {pixel_ids}")
|
317
|
+
print(f"Pixel XY: {xy}")
|
318
|
+
|
319
|
+
squares = convert_pixels_to_squares(xy)
|
320
|
+
|
321
|
+
xy = np.array(xy)
|
322
|
+
|
323
|
+
fig, ax = plt.subplots(figsize=(6, 6))
|
324
|
+
ax.scatter(xy[:, 0], xy[:, 1], s=1)
|
325
|
+
ax.set_title("Pixel Positions in Lambert 72")
|
326
|
+
ax.set_xlabel("X (Lambert 72)")
|
327
|
+
ax.set_ylabel("Y (Lambert 72)")
|
328
|
+
|
329
|
+
# plot squares
|
330
|
+
for square in squares:
|
331
|
+
(x1, y1), (x2, y2), (x3, y3), (x4, y4) = square
|
332
|
+
ax.plot([x1, x2, x3, x4, x1], [y1, y2, y3, y4, y1], color='red')
|
333
|
+
ax.set_aspect('equal', adjustable='box')
|
334
|
+
plt.show()
|
wolfhece/hydrology/constant.py
CHANGED
@@ -22,6 +22,7 @@ tom_2layers_UH = 8
|
|
22
22
|
tom_HBV = 9
|
23
23
|
tom_SAC_SMA = 10
|
24
24
|
tom_NAM = 11
|
25
|
+
tom_SAC_SMA_LROF = 12
|
25
26
|
compare_opti = -1
|
26
27
|
|
27
28
|
|
@@ -36,6 +37,16 @@ tom_netRain_storage = 1
|
|
36
37
|
tom_transf_no = 0 # aucun modèle de transfert -> utilise les temps estimés
|
37
38
|
tom_transf_cst = 1 # modèle de transfert avec temps constant
|
38
39
|
|
40
|
+
# Type of source/input data
|
41
|
+
source_none = -1 #Données source non présente ou non disponible
|
42
|
+
source_custom = 0 #Données source sous format personnalisé (un mélange des données ci-dessous)
|
43
|
+
source_netcdf = 1 #Données source sous format NetCDF
|
44
|
+
source_IRM = 2 #données source sur base des fichiers matriciels IRM (pas journaliers et dispo sur l'Ourthe)
|
45
|
+
source_municipality_unit_hyeto = 3 #Données QDF de l'IRM par commune
|
46
|
+
source_point_measurements = 4 #Données pluvios SPW
|
47
|
+
source_Copernicus = 5 #Données de pluvios ou températures du projet Copernicus en netCDF
|
48
|
+
source_dist = 6 #Données de pluvios, températures ou evap maillés (polygon + time serie for each)
|
49
|
+
|
39
50
|
|
40
51
|
## dictionnay of the default indices for each landuse
|
41
52
|
DEFAULT_LANDUSE = {}
|