wolfhece 2.2.23__py3-none-any.whl → 2.2.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1902 @@
1
+ import os
2
+ from packaging import version
3
+ from enum import Enum
4
+ from pathlib import Path
5
+ import logging
6
+ import wx
7
+ import matplotlib.pyplot as plt
8
+ from tqdm import tqdm
9
+ from typing import Literal
10
+ from shapely.geometry import Point, LineString, Polygon
11
+
12
+ from .. import __version__
13
+ from ..PyTranslate import _
14
+ from ..PyGui import MapManager, WolfMapViewer, draw_type, Zones, zone, vector, cloud_vertices, wolfvertex
15
+ from .reporting import RapidReport
16
+ from ..pypolygons_scen import Polygons_Analyze, stored_values_unk, operators, stored_values_coords, stored_values_pos
17
+ from ..multiprojects import MultiProjects, project_type, Project
18
+
19
+
20
+ class Directory_Analysis(Enum):
21
+ PROJECTS = "projets"
22
+ REPORTS = "rapports"
23
+ VECTORS = "vecteurs"
24
+ CLOUDS = "nuages_de_points"
25
+ IMAGES = "images"
26
+ CACHE = "cache"
27
+
28
+ def get_lines_from_ax(ax:plt.Axes) -> list[LineString]:
29
+ """ Get the lines from a Matplotlib Axes object.
30
+
31
+ :param ax: The Matplotlib Axes object.
32
+ :return: A list of LineString objects representing the lines in the Axes.
33
+ """
34
+
35
+ lines = []
36
+ for line in ax.get_lines():
37
+ xdata, ydata = line.get_xdata(), line.get_ydata()
38
+ if len(xdata) > 1 and len(ydata) > 1:
39
+ lines.append(LineString(zip(xdata, ydata)))
40
+ return lines
41
+
42
+ def get_labels_from_ax(ax:plt.Axes) -> list[str]:
43
+ """ Get the labels from a Matplotlib Axes object.
44
+
45
+ :param ax: The Matplotlib Axes object.
46
+ :return: A list of labels from the Axes.
47
+ """
48
+ labels = []
49
+ for line in ax.get_lines():
50
+ label = line.get_label()
51
+ if label and label not in labels:
52
+ labels.append(label)
53
+ return labels
54
+
55
+ def set_lines_as_black(ax:plt.Axes, label:str | list[str] = None) -> None:
56
+ """ Set the line with the specified label to black in a Matplotlib Axes object.
57
+
58
+ :param ax: The Matplotlib Axes object.
59
+ :param label: The label of the line to set to black.
60
+ """
61
+ for line in ax.get_lines():
62
+ if label is None or line.get_label() == label or (isinstance(label, list) and line.get_label() in label):
63
+ line.set_color('black')
64
+ line.set_linestyle('-')
65
+ line.set_linewidth(1.5)
66
+ line.set_marker('')
67
+ line.set_markersize(0)
68
+ line.set_alpha(1.0)
69
+ logging.info(f"Line with label '{line.get_label()}' set to black.")
70
+ else:
71
+ logging.warning(f"No line found with label '{label}'.")
72
+
73
+ def change_style_in_ax(ax:plt.Axes, styles:dict[str, dict]) -> None:
74
+ """ Change the style of lines in a Matplotlib Axes object.
75
+
76
+ Available style properties:
77
+ - color: The color of the line.
78
+ - linestyle: The style of the line (e.g., '-', '--', '-.', ':').
79
+ - linewidth: The width of the line.
80
+ - marker: The marker style (e.g., 'o', 'x', '^').
81
+ - markersize: The size of the marker.
82
+ - alpha: The transparency of the line (0.0 to 1.0).
83
+ - label: The label for the line in the legend.
84
+
85
+ This function will iterate through all lines in the Axes and apply the styles based on the provided dictionary.
86
+
87
+ :param ax: The Matplotlib Axes object.
88
+ :param styles: A dict of style where key is the label and value is a tuple with style properties.
89
+ :return: None
90
+ """
91
+ for line in ax.get_lines():
92
+ label = line.get_label()
93
+ if label in styles:
94
+ style = styles[label]
95
+ if 'color' in style:
96
+ line.set_color(style['color'])
97
+ if 'linestyle' in style:
98
+ line.set_linestyle(style['linestyle'])
99
+ if 'linewidth' in style:
100
+ line.set_linewidth(style['linewidth'])
101
+ if 'marker' in style:
102
+ line.set_marker(style['marker'])
103
+ if 'markersize' in style:
104
+ line.set_markersize(style['markersize'])
105
+ if 'alpha' in style:
106
+ line.set_alpha(style['alpha'])
107
+ if 'label' in style:
108
+ line.set_label(style['label'])
109
+ # Log the style change
110
+ logging.info(f"Style changed for label '{label}': {style}")
111
+
112
+ else:
113
+ logging.debug(f"No style found for label '{label}'.")
114
+
115
+ def _sanitize_scenario_name(scenario_name: str | tuple[str,str]) -> str:
116
+ """ Sanitize the scenario name to ensure it is a string.
117
+ This function will strip whitespace and ensure the name is a string.
118
+ :param name: The scenario name to sanitize.
119
+ :return: A sanitized string representing the scenario name.
120
+ """
121
+ if isinstance(scenario_name, str):
122
+ scenario_name = scenario_name.strip()
123
+
124
+ elif isinstance(scenario_name, tuple):
125
+ if len(scenario_name) != 2:
126
+ logging.error("Scenario name tuple must contain exactly two elements.")
127
+ raise ValueError("Scenario name tuple must contain exactly two elements.")
128
+
129
+ scenario_name = scenario_name[0].strip()
130
+
131
+ return scenario_name
132
+
133
+ def list_directories(directory: Path) -> list[str]:
134
+ """ List directories in a given path. """
135
+ if not directory.exists():
136
+ logging.error(f"Directory {directory} does not exist.")
137
+ return []
138
+ return [d.name for d in directory.iterdir() if d.is_dir()]
139
+
140
+ def create_a_report(title, author) -> RapidReport:
141
+ """ Create a RapidReport instance.
142
+
143
+ :param title: The title of the report.
144
+ :param author: The author of the report.
145
+ :return: An instance of RapidReport.
146
+ """
147
+ try:
148
+ return RapidReport(main_title=title, author=author)
149
+ except Exception as e:
150
+ logging.error(f"Error creating RapidReport: {e}")
151
+ raise ImportError("Could not create RapidReport instance. Ensure that the RapidReport is properly initialized.")
152
+
153
+ def create_a_wolf_viewer() -> WolfMapViewer:
154
+ """ Create a WolfMapViewer instance.
155
+
156
+ :return: An instance of WolfMapViewer.
157
+ """
158
+
159
+ # check if a wx.App instance already exists
160
+ if not wx.GetApp():
161
+ logging.error(_("You need to create a wx.App instance before creating a WolfMapViewer or to call '%gui wx' in your Jupyter Notebook."))
162
+ return None
163
+ else:
164
+ logging.debug("Using existing wx.App instance.")
165
+
166
+ try:
167
+ maps = MapManager()
168
+ return maps.get_mapviewer()
169
+ except Exception as e:
170
+ logging.error(f"Error creating WolfMapViewer: {e}")
171
+ raise ImportError("Could not create WolfMapViewer instance. Ensure that the MapManager is properly initialized.")
172
+
173
+ def find_scenario_directory(base_directory: Path | str, scenario_name: str) -> Path | None:
174
+ """ Find the directory of a specific scenario within the base directory.
175
+
176
+ :param base_directory: The base directory where the scenarios are located.
177
+ :param scenario_name: The name of the scenario to find.
178
+ :return: The path to the scenario directory if found, None otherwise.
179
+ """
180
+ base_path = Path(base_directory)
181
+
182
+ # search if scenario_name is a directory in the base directory or its subdirectories
183
+ for dirpath, dirnames, filenames in os.walk(base_path):
184
+ if scenario_name in dirnames:
185
+ return Path(dirpath) / scenario_name
186
+
187
+ return None
188
+
189
+ def get_scenarios_directories(base_directory: Path | str, scenario_names:list[str]) -> dict:
190
+ """ Get the directories of all scenarios within the base directory.
191
+
192
+ :param base_directory: The base directory where the scenarios are located.
193
+ :param scenario_names: A list of scenario names to find.
194
+ :return: A list of paths to the scenario directories.
195
+ """
196
+ base_path = Path(base_directory)
197
+ ret = {scenario_name : find_scenario_directory(base_path, scenario_name) for scenario_name in scenario_names}
198
+
199
+ # check if None is in the list and logging it
200
+ for scenario_name, scenario_path in ret.items():
201
+ if scenario_path is None:
202
+ logging.warning(f"Scenario '{scenario_name}' not found in {base_path}.")
203
+ else:
204
+ logging.info(f"Found scenario '{scenario_name}' at {scenario_path}.")
205
+
206
+ return ret
207
+
208
+ def check_if_scenario_exists(base_directory: Path | str, scenario_name: str) -> bool:
209
+ """ Check if a specific scenario exists within the base directory.
210
+
211
+ :param base_directory: The base directory where the scenarios are located.
212
+ :param scenario_name: The name of the scenario to check.
213
+ :return: True if the scenario exists, False otherwise.
214
+ """
215
+ scenario_path = find_scenario_directory(base_directory, scenario_name)
216
+ if scenario_path is None:
217
+ logging.error(f"Scenario '{scenario_name}' not found in {base_directory}.")
218
+ return False
219
+ else:
220
+ logging.info(f"Scenario '{scenario_name}' found at {scenario_path}.")
221
+ return True
222
+
223
+ def check_if_scenarios_exist(base_directory: Path | str, scenario_names: list[str]) -> bool:
224
+ """ Check if all specified scenarios exist within the base directory.
225
+
226
+ :param base_directory: The base directory where the scenarios are located.
227
+ :param scenario_names: A list of scenario names to check.
228
+ :return: True if all scenarios exist, False otherwise.
229
+ """
230
+ all_exist = True
231
+ for scenario_name in scenario_names:
232
+ if not check_if_scenario_exists(base_directory, scenario_name):
233
+ all_exist = False
234
+ return all_exist
235
+
236
+ def check_analysis_directories(base_directory: Path | str) -> bool:
237
+ """ Check if the necessary directories for analysis exist.
238
+
239
+ :param base_directory: The base directory where the analysis directories are located.
240
+ :return: True if all directories exist, False otherwise.
241
+ """
242
+ ret = True
243
+ base_path = Path(base_directory)
244
+ for directory in Directory_Analysis:
245
+ dir_path = base_path / directory.value
246
+ if not dir_path.exists():
247
+ logging.error(f"Directory {dir_path} does not exist.")
248
+ logging.error(f"Please create the directory {dir_path} manually or call 'create_analysis_directories'.")
249
+ ret = False
250
+ else:
251
+ logging.info(f"Directory {dir_path} exists.")
252
+ return ret
253
+
254
+ def create_analysis_directories(base_directory: Path | str) -> str:
255
+ """ Create the necessary directories for analysis if they do not exist.
256
+
257
+ :param base_directory: The base directory where the analysis directories will be created.
258
+ :return: True if directories were created or already exist, False if an error occurred.
259
+ """
260
+ try:
261
+ base_path = Path(base_directory)
262
+ for directory in Directory_Analysis:
263
+ dir_path = base_path / directory.value
264
+ dir_path.mkdir(parents=True, exist_ok=True)
265
+ return _(f"Directories created successfully in {base_path}.")
266
+ except Exception as e:
267
+ return _(f"Error creating directories: {e}")
268
+
269
+ def get_directories_as_dict(base_directory: Path | str) -> dict:
270
+ """ Get the paths of the analysis directories.
271
+
272
+ :param base_directory: The base directory where the analysis directories are located.
273
+ :return: A dictionary with the paths of the analysis directories.
274
+ """
275
+ base_path = Path(base_directory)
276
+ create_analysis_directories(base_path) # Ensure directories exist
277
+ return {
278
+ Directory_Analysis.PROJECTS: base_path / Directory_Analysis.PROJECTS.value,
279
+ Directory_Analysis.REPORTS: base_path / Directory_Analysis.REPORTS.value,
280
+ Directory_Analysis.VECTORS: base_path / Directory_Analysis.VECTORS.value,
281
+ Directory_Analysis.CLOUDS: base_path / Directory_Analysis.CLOUDS.value,
282
+ Directory_Analysis.IMAGES: base_path / Directory_Analysis.IMAGES.value,
283
+ Directory_Analysis.CACHE: base_path / Directory_Analysis.CACHE.value,
284
+ }
285
+
286
+ def get_directories_as_list(base_directory: Path | str) -> list:
287
+ """ Get the paths of the analysis directories as a list.
288
+
289
+ :param base_directory: The base directory where the analysis directories are located.
290
+ :return: A list with the paths of the analysis directories. Ordered as per Directory_Analysis enum.
291
+ """
292
+ directories = get_directories_as_dict(base_directory)
293
+ return list(directories.values())
294
+
295
+ def _check_version(min_version: str) -> bool:
296
+ """
297
+ Check if the current version is greater than or equal to the minimum version.
298
+
299
+ Args:
300
+ min_version (str): The minimum required version.
301
+ current_version (str): The current version to check against.
302
+
303
+ Returns:
304
+ bool: True if the current version is greater than or equal to the minimum version, False otherwise.
305
+ """
306
+ return version.parse(__version__) >= version.parse(min_version)
307
+
308
+ def check_available_version_on_pypi() -> str:
309
+ """
310
+ Check the latest available version of the package on PyPI.
311
+
312
+ Returns:
313
+ str: The latest version available on PyPI.
314
+ """
315
+ import requests
316
+ from packaging import version
317
+
318
+ url = "https://pypi.org/pypi/wolfhece/json"
319
+ response = requests.get(url)
320
+ if response.status_code == 200:
321
+ data = response.json()
322
+ latest_version = data['info']['version']
323
+ return latest_version
324
+ else:
325
+ raise Exception(f"Failed to fetch version information from PyPI. Status code: {response.status_code}")
326
+
327
+ def can_upgrade_wolfhece() -> bool:
328
+ """
329
+ Check if the current version can be upgraded to the minimum required version.
330
+
331
+ Args:
332
+ min_version (str): The minimum required version.
333
+
334
+ Returns:
335
+ bool: True if the current version can be upgraded, False otherwise.
336
+ """
337
+ latest_version = check_available_version_on_pypi()
338
+ return version.parse(latest_version) >= version.parse(__version__)
339
+
340
+ def check_version(min_version: str) -> bool:
341
+ """
342
+ Check if the current version is greater than or equal to the minimum version.
343
+
344
+ Args:
345
+ min_version (str): The minimum required version.
346
+
347
+ Returns:
348
+ bool: True if the current version is greater than or equal to the minimum version, False otherwise.
349
+ """
350
+ try:
351
+ # Permet de tester la version accessible via le PATH
352
+ if _check_version(min_version):
353
+ logging.info(f"Version de wolfhece : {min_version} ou supérieure est installée.")
354
+ else:
355
+ logging.error(f"Version de wolfhece : {min_version} ou supérieure n'est pas installée.")
356
+ logging.info(f"Version available on Pypi {check_available_version_on_pypi()}")
357
+ logging.info(f"Can I upgrade? {can_upgrade_wolfhece()}")
358
+ raise ImportError("Version de wolfhece insuffisante.")
359
+ except:
360
+ logging.error(f"Erreur lors de la vérification de la version de wolfhece. Assurez-vous que wolfhece est installé et accessible dans le PATH.")
361
+ logging.info(f"Version available on Pypi {check_available_version_on_pypi()}")
362
+ raise ImportError("Erreur lors de la vérification de la version de wolfhece.")
363
+
364
+ class Analysis_Scenarios():
365
+
366
+ def __init__(self, base_directory: Path | str, storage_directory: Path | str = None, name:str = ''):
367
+
368
+ self.name = name.strip() if name else 'Analysis_Scenarios'
369
+ self.base_directory = Path(base_directory)
370
+ self.storage_directory = storage_directory if storage_directory is not None else self.base_directory
371
+
372
+ self.check_directories()
373
+ self.directories = get_directories_as_dict(self.base_directory)
374
+
375
+ self.scenarios_directories:dict[str:Path]
376
+ self.scenarios_directories = {}
377
+ self.scenarios = []
378
+
379
+ self.current_scenario = None
380
+ self.report = None
381
+ self._report_name = 'analysis_report.docx'
382
+ self._report_saved_once = False
383
+ self.mapviewer = None
384
+ self._background_images = None
385
+
386
+ self._polygons = {}
387
+
388
+ self._return_periods = []
389
+
390
+ self._modifications = {}
391
+
392
+ self._multiprojects = None
393
+ self._cached_date = False
394
+
395
+ self._reference_polygon:Polygons_Analyze = None
396
+ self._landmarks:Zones = None
397
+ self._landmarks_s_label = []
398
+
399
+ self._measures:dict[str, Zones] = {}
400
+ self._measures_zones:dict[str, list[str]] = {}
401
+ self._projected_measures:dict[tuple[str], list[vector, dict]] = {}
402
+
403
+ self._cloud:list[tuple[float, float, str]] = [] # List of tuples (s, z, label) for point clouds
404
+
405
+ self._images = {}
406
+ self._zoom = {}
407
+
408
+ logging.info(f"Analysis directories initialized: {self.directories}")
409
+
410
+ def add_zoom(self, label:str, bounds:tuple[float]) -> None:
411
+ """ Add a zoom level to the analysis.
412
+
413
+ :param label: The label for the zoom level.
414
+ :param bounds: A tuple (xmin, xmax, ymin, ymax) representing the zoom bounds.
415
+ """
416
+ if label in self._zoom:
417
+ logging.warning(f"Zoom level '{label}' already exists. Overwriting.")
418
+
419
+ self._zoom[label] = bounds
420
+ logging.info(f"Zoom memory '{label}' added with bounds {bounds}.")
421
+
422
+ def add_zoom_from_XY(self, label:str, xy1:tuple[float], xy2:tuple[float], zmin:float, zmax:float) -> None:
423
+ """ Add a zoom level to the analysis from X and Y coordinates.
424
+
425
+ :param label: The label for the zoom level.
426
+ :param xy1: A tuple (x1, y1) representing the first point.
427
+ :param xy2: A tuple (x2, y2) representing the second point.
428
+ :param zmin: The minimum zoom level.
429
+ :param zmax: The maximum zoom level.
430
+ """
431
+ if self.reference_polygon is None:
432
+ logging.error("No reference polygon set for the analysis. Please set a reference polygon first.")
433
+ raise ValueError("No reference polygon set for the analysis. Please set a reference polygon first.")
434
+
435
+ ls = self.reference_polygon.riverbed.linestring
436
+
437
+ x1,y1 = xy1
438
+ s1 = ls.project(Point(x1, y1))
439
+ x2,y2 = xy2
440
+ s2 = ls.project(Point(x2, y2))
441
+
442
+ if s1 < s2:
443
+ self.add_zoom(label, (s1, s2, zmin, zmax))
444
+ else:
445
+ self.add_zoom(label, (s2, s1, zmin, zmax))
446
+
447
+ logging.info(f"Zoom memory '{label}' added with bounds ({s1}, {s2}, {zmin}, {zmax}).")
448
+
449
+ def get_zoom(self, label:str) -> tuple[float]:
450
+ """ Get the zoom level bounds for a specific label.
451
+
452
+ :param label: The label for the zoom level.
453
+ :return: A tuple (xmin, xmax, ymin, ymax) representing the zoom bounds.
454
+ """
455
+ if label not in self._zoom:
456
+ logging.error(f"Zoom level '{label}' does not exist.")
457
+ return (0., self.reference_polygon.riverbed.length2D, 0., 300.)
458
+
459
+ return self._zoom[label]
460
+
461
+ @property
462
+ def measures(self) -> dict:
463
+ """ Return the measures used in the analysis.
464
+
465
+ :return: An instance of Zones or None if not set.
466
+ """
467
+ return self._measures
468
+
469
+ def add_cloud(self, cloud:cloud_vertices | list[tuple[float, float, str]] | str = None, label:str = '') -> None:
470
+ """ Add a cloud of points to the analysis.
471
+
472
+ :param cloud: A cloud of points as a cloud_vertices instance or a list of tuples (s, z, label).
473
+ """
474
+
475
+ if isinstance(cloud, str):
476
+ # If cloud is a string, assume it's a path to a cloud_vertices file
477
+ cloud_path = Path(cloud)
478
+ if not cloud_path.exists():
479
+ # Search in the clouds directory
480
+ cloud_path = self.directories[Directory_Analysis.CLOUDS] / cloud_path
481
+
482
+ if not cloud_path.exists():
483
+ logging.error(f"Cloud file '{cloud}' does not exist.")
484
+ raise FileNotFoundError(f"Cloud file '{cloud}' does not exist.")
485
+
486
+ cloud = cloud_vertices(fname=cloud_path, header=True)
487
+ self.add_cloud(cloud, label)
488
+
489
+ elif isinstance(cloud, cloud_vertices):
490
+ sz_cloud:cloud_vertices
491
+ sz_cloud = cloud.projectontrace(self.reference_polygon.riverbed)
492
+ self._cloud += [(curvert['vertex'].x, curvert['vertex'].y, label) for curvert in sz_cloud.myvertices.values()]
493
+ logging.info(f"Cloud added with {len(cloud.myvertices)} points.")
494
+
495
+ elif isinstance(cloud, list) and all(isinstance(pt, tuple) and len(pt) == 3 for pt in cloud):
496
+ self._cloud.extend(cloud)
497
+ logging.info(f"Cloud added with {len(cloud)} points.")
498
+ else:
499
+ logging.error("Invalid cloud format. Must be a cloud_vertices instance or a list of tuples (s, z, label).")
500
+ raise ValueError("Invalid cloud format. Must be a cloud_vertices instance or a list of tuples (s, z, label).")
501
+
502
+ def add_cloud_point(self, s: float, z: float, label: str = None) -> None:
503
+ """ Add a point to the cloud in the analysis.
504
+
505
+ :param s: The s-coordinate of the point.
506
+ :param z: The z-coordinate of the point.
507
+ :param label: An optional label for the point.
508
+ """
509
+ if self.reference_polygon is None:
510
+ logging.error("No reference polygon set for the analysis. Please set a reference polygon first.")
511
+ raise ValueError("No reference polygon set for the analysis. Please set a reference polygon first.")
512
+
513
+ if label is None:
514
+ label = f"Point {len(self._cloud) + 1}"
515
+
516
+ self._cloud.append((s, z, label))
517
+ logging.info(f"Point added to cloud: ({s}, {z}, '{label}')")
518
+
519
+ def add_cloud_point_XY(self, x: float, y: float, z: float = None, label: str = None) -> None:
520
+ """ Add a point to the cloud in the analysis from X and Y coordinates.
521
+
522
+ :param x: The X coordinate of the point.
523
+ :param y: The Y coordinate of the point.
524
+ :param z: The Z coordinate of the point (optional).
525
+ :param label: An optional label for the point.
526
+ """
527
+ if self.reference_polygon is None:
528
+ logging.error("No reference polygon set for the analysis. Please set a reference polygon first.")
529
+ raise ValueError("No reference polygon set for the analysis. Please set a reference polygon first.")
530
+
531
+ point = Point(x, y)
532
+ ls = self.reference_polygon.riverbed.linestring
533
+ s = ls.project(point)
534
+
535
+ if label is None:
536
+ label = f"Point {len(self._cloud) + 1}"
537
+
538
+ self._cloud.append((s, z, label))
539
+ logging.info(f"Point added to cloud: ({s}, {z}, '{label}')")
540
+
541
+ def add_measures(self, measures:Zones | str | Path, zones:list[str] = None, style:dict = None) -> None:
542
+ """ Add measures to the analysis.
543
+
544
+ :param measures: A Zones instance or a path to a vector file containing the measures.
545
+ """
546
+
547
+ if isinstance(measures, Zones):
548
+ key = measures.idx
549
+ self._measures[key] = measures
550
+ elif isinstance(measures, (str, Path)):
551
+ if not os.path.exists(measures):
552
+
553
+ # Search in the vectors directory
554
+ measures = self.directories[Directory_Analysis.VECTORS] / measures
555
+ if not os.path.exists(measures):
556
+ logging.error(f"Measures file '{measures}' does not exist.")
557
+ raise FileNotFoundError(f"Measures file '{measures}' does not exist.")
558
+
559
+ key = Path(measures).stem
560
+ cur_measure = self._measures[key] = Zones(filename= measures)
561
+
562
+ # check if each zone exists in the measures
563
+ if zones is not None:
564
+ if not isinstance(zones, list):
565
+ logging.error("Zones must be a list of strings.")
566
+ raise ValueError("Zones must be a list of strings.")
567
+
568
+ for zone in zones:
569
+ if cur_measure[zone] is None:
570
+ logging.error(f"Zone '{zone}' not found in the measures.")
571
+ raise ValueError(f"Zone '{zone}' not found in the measures.")
572
+
573
+ self._measures_zones[key] = zones if zones is not None else cur_measure.myzones
574
+
575
+ for zone_name in self._measures_zones[key]:
576
+ curz = cur_measure[zone_name]
577
+ for vec in curz.myvectors:
578
+ # create a local key for the projected measures
579
+ lockey = (key, zone_name, vec.myname)
580
+ self._projected_measures[lockey] = [vec.projectontrace(self.reference_polygon.riverbed), style]
581
+
582
+ logging.info(f"Measures added to the analysis: {self._measures[key].idx}")
583
+
584
+ def get_landmarks_labels(self) -> list[str]:
585
+ """ Get the names of the landmarks in the analysis.
586
+
587
+ :return: A list of names of the landmarks.
588
+ """
589
+ if self._landmarks_s_label is None:
590
+ logging.warning("No landmarks have been added to the analysis.")
591
+ return []
592
+
593
+ return [mark[2] for mark in self._landmarks_s_label]
594
+
595
+ @property
596
+ def reference_polygon(self) -> Polygons_Analyze | None:
597
+ """ Return the reference polygon used in the analysis.
598
+
599
+ :return: An instance of Polygons_Analyze or None if not set.
600
+ """
601
+ return self._reference_polygon
602
+
603
+ @property
604
+ def landmarks(self) -> Zones | None:
605
+ """ Return the landmarks used in the analysis.
606
+
607
+ :return: An instance of Zones or None if not set.
608
+ """
609
+ return self._landmarks
610
+
611
+ def add_landmarks(self, landmarks:Zones | str | Path) -> None:
612
+ """ Add landmarks to the analysis.
613
+
614
+ :param landmarks: A Zones instance or a path to a vector file containing the landmarks.
615
+ """
616
+
617
+ if isinstance(landmarks, Zones):
618
+ self._landmarks = landmarks
619
+ elif isinstance(landmarks, (str, Path)):
620
+ if not os.path.exists(landmarks):
621
+
622
+ # Search in the vectors directory
623
+ landmarks = self.directories[Directory_Analysis.VECTORS] / landmarks
624
+ if not os.path.exists(landmarks):
625
+ logging.error(f"Landmarks file '{landmarks}' does not exist.")
626
+ raise FileNotFoundError(f"Landmarks file '{landmarks}' does not exist.")
627
+
628
+ self._landmarks = Zones(filename= landmarks)
629
+
630
+ if self.reference_polygon is not None:
631
+ # Compute the distance to the reference polygon
632
+ ls = self.reference_polygon.riverbed.linestring
633
+ self._landmarks_s_label = [(ls.project(Point((curvec.myvertices[0].x+curvec.myvertices[-1].x)/2., (curvec.myvertices[0].y+curvec.myvertices[-1].y)/2.)), None, curvec.myname) for curvec in self._landmarks.myzones[0].myvectors]
634
+
635
+ logging.info(f"Landmarks added to the analysis: {self._landmarks.idx}")
636
+
637
+ def add_landmark_from_XY(self, x: float, y: float, label: str, z:float = None) -> None:
638
+ """ Add a landmark to the analysis from X and Y coordinates.
639
+
640
+ :param x: The X coordinate of the landmark.
641
+ :param y: The Y coordinate of the landmark.
642
+ :param name: The name of the landmark.
643
+ :param z: The Z coordinate of the landmark (optional).
644
+ """
645
+
646
+ if self.reference_polygon is None:
647
+ logging.error("No reference polygon set for the analysis. Please set a reference polygon first.")
648
+ raise ValueError("No reference polygon set for the analysis. Please set a reference polygon first.")
649
+
650
+ point = Point(x, y)
651
+ ls = self.reference_polygon.riverbed.linestring
652
+ self._landmarks_s_label.append((ls.project(point), z, label))
653
+ logging.info(f"Landmark '{label}' added at coordinates ({x}, {y}).")
654
+
655
+ def update_landmark(self, label: str, s_xy:float |tuple[float] = None, z: float = None) -> None:
656
+ """ Update a landmark in the analysis. """
657
+ if self._landmarks is None:
658
+ logging.error("No landmarks have been added to the analysis.")
659
+ raise ValueError("No landmarks have been added to the analysis.")
660
+
661
+ if label not in self.get_landmarks_labels():
662
+ logging.error(f"Landmark '{label}' not found in the analysis.")
663
+ raise ValueError(f"Landmark '{label}' not found in the analysis.")
664
+
665
+ for i, mark in enumerate(self._landmarks_s_label):
666
+ if mark[2] == label:
667
+
668
+ z = mark[1] if mark[1] is not None else z
669
+
670
+ if s_xy is not None:
671
+ if isinstance(s_xy, tuple):
672
+ if len(s_xy) != 2:
673
+ logging.error("s_xy must be a tuple of (s, xy) or a single float value.")
674
+ raise ValueError("s_xy must be a tuple of (s, xy) or a single float value.")
675
+ x, y = s_xy
676
+ pt = Point(x, y)
677
+ ls = self.reference_polygon.riverbed.linestring
678
+ s_xy = ls.project(pt)
679
+
680
+ if isinstance(s_xy, float):
681
+ if s_xy < 0 or s_xy > self.reference_polygon.riverbed.length:
682
+ logging.error(f"s_xy value {s_xy} is out of bounds for the reference polygon.")
683
+ raise ValueError(f"s_xy value {s_xy} is out of bounds for the reference polygon.")
684
+
685
+ self._landmarks_s_label[i] = (s_xy, z, label)
686
+
687
+ def plot_cloud(self, ax: plt.Axes, bounds:tuple[float]) -> plt.Axes:
688
+
689
+ xmin, xmax, ymin, ymax = bounds
690
+
691
+ used_cloud = [(s, z, label) for s, z, label in self._cloud if s >= xmin and s <= xmax]
692
+ i=0
693
+ for s, z, label in used_cloud:
694
+ ax.scatter(s, z, label = label if i == 0 else "", c='black', marker='o', s=10, alpha=0.5)
695
+ i += 1
696
+
697
+ return ax
698
+
699
+ def plot_measures(self, ax:plt.Axes, bounds:tuple[float], style:dict = None) -> plt.Axes:
700
+
701
+ xmin, xmax, ymin, ymax = bounds
702
+ i=0
703
+ for key, measure in self._projected_measures.items():
704
+ vec = measure[0]
705
+ style = measure[1]
706
+ sz = vec.xy
707
+ s = sz[:,0]
708
+ z = sz[:,1]
709
+
710
+ color = 'grey'
711
+ linestyle = '--'
712
+
713
+ if style is not None:
714
+ color = style['color'] if 'color' in style else 'grey'
715
+ linestyle = style['linestyle'] if 'linestyle' in style else '--'
716
+
717
+ portion = (s > xmin) #& (s < xmax)
718
+ if i==0:
719
+ ax.plot(s[portion], z[portion], color=color, linestyle=linestyle, label = _('Measures') if i == 0 else "")
720
+ else:
721
+ ax.plot(s[portion], z[portion], color=color, linestyle=linestyle)
722
+ i+=1
723
+
724
+ def plot_landmarks(self, ax:plt.Axes, bounds:tuple[float], style:dict = None) -> plt.Axes:
725
+ """ Trace les repères sur un axe Matplotlib
726
+
727
+ :param ax: axe Matplotlib
728
+ :param bounds: tuple (xmin, xmax, ymin, ymax) for the plot limits
729
+ :param style: Optional style dictionary for the landmarks.
730
+ """
731
+
732
+ xmin, xmax, ymin, ymax = bounds
733
+
734
+ i=0
735
+ for mark in self._landmarks_s_label:
736
+ s, z, label = mark
737
+
738
+ if style is None:
739
+ color = 'black'
740
+ linestyle = '--'
741
+ linewidth = 0.7
742
+ marker = 'x'
743
+ else:
744
+ color = style['color'] if 'color' in style else 'black'
745
+ linestyle = style['linestyle'] if 'linestyle' in style else '--'
746
+ linewidth = style['linewidth'] if 'linewidth' in style else 0.7
747
+ marker = style['marker'] if 'marker' in style else 'x'
748
+
749
+ if s < xmin or s > xmax:
750
+ continue
751
+ if z is None:
752
+ ax.vlines(s, ymin, ymax, color=color, linewidth=linewidth, linestyle=linestyle, label=_('Landmarks') if i == 1 else "")
753
+ ax.text(s, ymin, label, rotation=30)
754
+ else:
755
+ ax.scatter(s, z, color=color, marker=marker, label=_('Landmarks') if i == 1 else "")
756
+ i += 1
757
+
758
+ return ax
759
+
760
+ def plot_waterlines(self, scenario:str | tuple[str, str] | list[str] | list[tuple[str, str]],
761
+ bounds:tuple[float] | str,
762
+ operator:operators = operators.MEDIAN,
763
+ plot_annex:bool = True,
764
+ save:bool = False) -> tuple[plt.Figure, plt.Axes]:
765
+ """ Plot the waterlines for a specific scenario.
766
+
767
+ :param scenario: The name of the scenario to plot waterlines for or a list of scenarios for comparison.
768
+ :param bounds: A tuple (xmin, xmax, ymin, ymax) representing the zoom bounds or a string label for a zoom level.
769
+ :param operator: The operator to apply on the waterlines.
770
+ :param save: If True, save the plot as an image file.
771
+ """
772
+
773
+ if isinstance(bounds, str):
774
+ # If bounds is a string, assume it's a label for a zoom level
775
+ bounds = self.get_zoom(bounds)
776
+ if bounds is None:
777
+ logging.error(f"Zoom level '{bounds}' does not exist.")
778
+ raise ValueError(f"Zoom level '{bounds}' does not exist.")
779
+ elif isinstance(bounds, tuple):
780
+ if len(bounds) != 4:
781
+ logging.error("Bounds must be a tuple of (xmin, xmax, ymin, ymax).")
782
+ raise ValueError("Bounds must be a tuple of (xmin, xmax, ymin, ymax).")
783
+ xmin, xmax, ymin, ymax = bounds
784
+
785
+ if isinstance(scenario, (str, tuple)):
786
+
787
+ scenario = _sanitize_scenario_name(scenario)
788
+
789
+ fig,ax = plt.subplots(1,1)
790
+
791
+ self.get_polygon(scenario).plot_waterline((fig,ax), which_group=scenario, operator=operator.MEDIAN)
792
+
793
+ filename = self.directories[Directory_Analysis.IMAGES] / f"{self.name}_{scenario}_{str(xmin)}_{str(xmax)}_waterlines.png"
794
+
795
+ elif isinstance(scenario, list):
796
+ # We want to compare multiple scenarios
797
+ if len(scenario) < 2:
798
+ logging.error("At least two scenarios are required to compare waterlines.")
799
+ raise ValueError("At least two scenarios are required to compare waterlines.")
800
+
801
+ ref, sim = scenario[0]
802
+ ref = _sanitize_scenario_name(ref)
803
+
804
+ # plot topography / bed elevation for the reference scenario
805
+ s, z = self.get_polygon(ref).get_s_values(stored_values_unk.TOPOGRAPHY, which_group=ref, operator=operator, which_sim=sim)
806
+ fig, ax = plt.subplots(1, 1)
807
+ ax.plot(s, z, label=ref, color='black', linestyle='-', linewidth=2)
808
+
809
+ # plot water surface elevation for the reference scenario
810
+ s, z = self.get_polygon(ref).get_s_values(stored_values_unk.WATERLEVEL, which_group=ref, operator=operator, which_sim=sim)
811
+ ax.plot(s, z, label=f"{ref} - {stored_values_unk.WATERLEVEL.value}", color='blue', linestyle='-', linewidth=2)
812
+
813
+ # plot topography / bed elevation for the simulation scenarios
814
+ for cur_scenario in scenario[1:]:
815
+ scen_name, sim_name = cur_scenario
816
+ scen_name = _sanitize_scenario_name(scen_name)
817
+ s, z = self.get_polygon(scen_name).get_s_values(stored_values_unk.TOPOGRAPHY, which_group=scen_name, operator=operator, which_sim=sim_name)
818
+ ax.plot(s, z, label=f"{scen_name} - {_('Bathymetry')}", linestyle='--', linewidth=1.5)
819
+
820
+ # plot water surface elevation for the simulation scenarios
821
+ for cur_scenario in scenario[1:]:
822
+ scen_name, sim_name = cur_scenario
823
+ scen_name = _sanitize_scenario_name(scen_name)
824
+ s, z = self.get_polygon(scen_name).get_s_values(stored_values_unk.WATERLEVEL, which_group=scen_name, operator=operator, which_sim=sim_name)
825
+ ax.plot(s, z, label=f"{scen_name} - {sim_name}", linestyle='--', linewidth=1.5)
826
+
827
+ filename = self.directories[Directory_Analysis.IMAGES] / f"{self.name}_{ref}_{str(xmin)}_{str(xmax)}_waterlines_comparison.png"
828
+
829
+ fig.set_size_inches(20,10)#15
830
+ ax.legend()
831
+ #zoomA
832
+ ax.set_xlim(xmin, xmax)
833
+ ax.set_ylim(ymin, ymax)
834
+ ax.grid()
835
+
836
+ if plot_annex:
837
+ # Plot the cloud of points
838
+ self.plot_cloud(ax, bounds)
839
+
840
+ # Plot the measures
841
+ self.plot_measures(ax, bounds)
842
+
843
+ # Plot the landmarks
844
+ self.plot_landmarks(ax, bounds)
845
+
846
+ fig.tight_layout()
847
+
848
+ if save:
849
+ self.save_image(filename, fig=fig, format='png')
850
+ logging.info(f"Waterlines plot saved as {filename}")
851
+
852
+ return fig, ax
853
+
854
+ def plot_waterheads(self, scenario:str | tuple[str, str],
855
+ bounds:tuple[float] | str,
856
+ operator:operators = operators.MEDIAN,
857
+ plot_annex:bool = True,
858
+ save:bool = False) -> tuple[plt.Figure, plt.Axes]:
859
+ """ Plot the heads for a specific scenario.
860
+
861
+ :param scenario: The name of the scenario to plot heads for or a list of scenarios for comparison.
862
+ :param bounds: A tuple (xmin, xmax, ymin, ymax) representing the zoom bounds or a string label for a zoom level.
863
+ :param operator: The operator to apply on the heads.
864
+ :param plot_annex: If True, plot the cloud of points, measures, and landmarks.
865
+ :param save: If True, save the plot as an image file.
866
+ """
867
+ if isinstance(bounds, str):
868
+ # If bounds is a string, assume it's a label for a zoom level
869
+ bounds = self.get_zoom(bounds)
870
+ if bounds is None:
871
+ logging.error(f"Zoom level '{bounds}' does not exist.")
872
+ raise ValueError(f"Zoom level '{bounds}' does not exist.")
873
+ elif isinstance(bounds, tuple):
874
+ if len(bounds) != 4:
875
+ logging.error("Bounds must be a tuple of (xmin, xmax, ymin, ymax).")
876
+ raise ValueError("Bounds must be a tuple of (xmin, xmax, ymin, ymax).")
877
+ xmin, xmax, ymin, ymax = bounds
878
+
879
+ if isinstance(scenario, (str, tuple)):
880
+
881
+ scenario = _sanitize_scenario_name(scenario)
882
+ fig, ax = plt.subplots(1, 1)
883
+ self.get_polygon(scenario).plot_waterhead((fig, ax), which_group=scenario, operator=operator.MEDIAN)
884
+ filename = self.directories[Directory_Analysis.IMAGES] / f"{self.name}_{scenario}_{str(xmin)}_{str(xmax)}_heads.png"
885
+ elif isinstance(scenario, list):
886
+
887
+ ref, sim = scenario[0]
888
+ ref = _sanitize_scenario_name(ref)
889
+ fig, ax = plt.subplots(1, 1)
890
+ # plot heads for the reference scenario
891
+ s, z = self.get_polygon(ref).get_s_values(stored_values_unk.HEAD, which_group=ref, operator=operator, which_sim=sim)
892
+ ax.plot(s, z, label=f"{ref} - {stored_values_unk.HEAD.value}", color='blue', linestyle='-', linewidth=2)
893
+ # plot heads for the simulation scenarios
894
+ for cur_scenario in scenario[1:]:
895
+ scen_name, sim_name = cur_scenario
896
+ scen_name = _sanitize_scenario_name(scen_name)
897
+ s, z = self.get_polygon(scen_name).get_s_values(stored_values_unk.HEAD, which_group=scen_name, operator=operator, which_sim=sim_name)
898
+ ax.plot(s, z, label=f"{scen_name} - {sim_name}", linestyle='--', linewidth=1.5)
899
+ filename = self.directories[Directory_Analysis.IMAGES] / f"{self.name}_{ref}_{str(xmin)}_{str(xmax)}_heads_comparison.png"
900
+
901
+ fig.set_size_inches(20, 10)
902
+ ax.legend()
903
+ #zoomA
904
+ ax.set_xlim(xmin, xmax)
905
+ ax.set_ylim(ymin, ymax)
906
+ ax.grid()
907
+ if plot_annex:
908
+ # Plot the cloud of points
909
+ self.plot_cloud(ax, bounds)
910
+
911
+ # Plot the measures
912
+ self.plot_measures(ax, bounds)
913
+
914
+ # Plot the landmarks
915
+ self.plot_landmarks(ax, bounds)
916
+ fig.tight_layout()
917
+ if save:
918
+ self.save_image(filename, fig=fig, format='png')
919
+ logging.info(f"Heads plot saved as {filename}")
920
+
921
+ return fig, ax
922
+
923
+ def plot_Froude(self, scenario:str | tuple[str, str] | list[str] | list[tuple[str, str]],
924
+ bounds:tuple[float] | str,
925
+ operator:operators = operators.MEDIAN,
926
+ plot_annex:bool = True,
927
+ save:bool = False) -> tuple[plt.Figure, plt.Axes]:
928
+ """ Plot the Froude for a specific scenario.
929
+
930
+ :param scenario: The name of the scenario to plot waterlines for or a list of scenarios for comparison.
931
+ :param bounds: A tuple (xmin, xmax, ymin, ymax) representing the zoom bounds or a string label for a zoom level.
932
+ :param operator: The operator to apply on the waterlines.
933
+ :param save: If True, save the plot as an image file.
934
+ """
935
+
936
+ if isinstance(bounds, str):
937
+ # If bounds is a string, assume it's a label for a zoom level
938
+ bounds = self.get_zoom(bounds)
939
+ if bounds is None:
940
+ logging.error(f"Zoom level '{bounds}' does not exist.")
941
+ raise ValueError(f"Zoom level '{bounds}' does not exist.")
942
+ elif isinstance(bounds, tuple):
943
+ if len(bounds) != 4:
944
+ logging.error("Bounds must be a tuple of (xmin, xmax, ymin, ymax).")
945
+ raise ValueError("Bounds must be a tuple of (xmin, xmax, ymin, ymax).")
946
+ xmin, xmax, ymin, ymax = bounds
947
+
948
+ if isinstance(scenario, (str, tuple)):
949
+
950
+ scenario = _sanitize_scenario_name(scenario)
951
+
952
+ fig,ax = plt.subplots(1,1)
953
+ # plot Froude number for the scenario
954
+
955
+ for sim in self.list_sims_in_polygons(scenario):
956
+ # plot Froude number for the reference scenario
957
+ s, z = self.get_polygon(scenario).get_s_values(stored_values_unk.FROUDE, which_group=scenario, operator=operator, which_sim=sim)
958
+ ax.plot(s, z, label=f"{scenario} - {stored_values_unk.FROUDE.value}", linestyle='-', linewidth=1.5)
959
+
960
+ filename = self.directories[Directory_Analysis.IMAGES] / f"{self.name}_{scenario}_{str(xmin)}_{str(xmax)}_Froude.png"
961
+
962
+ elif isinstance(scenario, list):
963
+
964
+ # We want to compare multiple scenarios
965
+ if len(scenario) < 2:
966
+ logging.error("At least two scenarios are required to compare waterlines.")
967
+ raise ValueError("At least two scenarios are required to compare waterlines.")
968
+
969
+ fig,ax = plt.subplots(1,1)
970
+
971
+ ref, sim = scenario[0]
972
+ ref = _sanitize_scenario_name(ref)
973
+
974
+ # plot water surface elevation for the reference scenario
975
+ s, z = self.get_polygon(ref).get_s_values(stored_values_unk.FROUDE, which_group=ref, operator=operator, which_sim=sim)
976
+ ax.plot(s, z, label=f"{ref} - {stored_values_unk.FROUDE.value}", color='blue', linestyle='-', linewidth=2)
977
+
978
+ # plot water surface elevation for the simulation scenarios
979
+ for cur_scenario in scenario[1:]:
980
+ scen_name, sim_name = cur_scenario
981
+ scen_name = _sanitize_scenario_name(scen_name)
982
+ s, z = self.get_polygon(scen_name).get_s_values(stored_values_unk.FROUDE, which_group=scen_name, operator=operator, which_sim=sim_name)
983
+ ax.plot(s, z, label=f"{scen_name} - {sim_name}", linestyle='--', linewidth=1.5)
984
+
985
+ filename = self.directories[Directory_Analysis.IMAGES] / f"{self.name}_{ref}_{str(xmin)}_{str(xmax)}_Froude_comparison.png"
986
+
987
+ fig.set_size_inches(20,10)#15
988
+ ax.legend()
989
+ #zoomA
990
+ ax.set_xlim(xmin, xmax)
991
+ ax.set_ylim(0, 2.)
992
+ ax.plot([xmin, xmax], [1, 1], color='black', linestyle='--', label=_('Froude = 1'))
993
+ ax.grid()
994
+
995
+ if plot_annex:
996
+ # Plot the landmarks
997
+ self.plot_landmarks(ax, (bounds[0], bounds[1], 0, 2.))
998
+
999
+ fig.suptitle(_('Froude Number'), fontsize=16)
1000
+ fig.tight_layout()
1001
+
1002
+ if save:
1003
+ self.save_image(filename, fig=fig, format='png')
1004
+ logging.info(f"Waterlines plot saved as {filename}")
1005
+
1006
+ return fig, ax
1007
+
1008
+ def save_image(self, filename: str, fig: plt.Figure = None, dpi: int = 300, format:Literal['png', 'pdf', 'svg'] = 'png') -> None:
1009
+ """ Save the current figure as an image file.
1010
+
1011
+ :param filename: The name of the file to save the image to.
1012
+ :param fig: The figure to save. If None, uses the current active figure.
1013
+ :param dpi: The resolution of the saved image in dots per inch.
1014
+ :param format: The format of the saved image (png, pdf, svg). Default is 'png'.
1015
+ """
1016
+ if fig is None:
1017
+ fig = plt.gcf()
1018
+
1019
+ if not format in ['png', 'pdf', 'svg']:
1020
+ logging.error(f"Format '{format}' is not supported. Supported formats are 'png', 'pdf', and 'svg'.")
1021
+ raise ValueError(f"Format '{format}' is not supported. Supported formats are 'png', 'pdf', and 'svg'.")
1022
+
1023
+ fig.savefig(filename, dpi=dpi, format=format)
1024
+
1025
+ self._images[filename] = filename
1026
+
1027
+ logging.info(f"Image saved as {filename} with dpi={dpi} and format={format}.")
1028
+
1029
+ def export_values_as(self, scenario: str | tuple[str, str] = None,
1030
+ simulation_key:list[str] = None,
1031
+ which_values:list[stored_values_unk] = [stored_values_unk.TOPOGRAPHY,
1032
+ stored_values_unk.WATERDEPTH,
1033
+ stored_values_unk.WATERLEVEL,
1034
+ stored_values_unk.HEAD,
1035
+ stored_values_coords.X,
1036
+ stored_values_coords.Y],
1037
+ operator:operators = operators.MEDIAN,
1038
+ filename: str = None,
1039
+ format:Literal['xlsx', 'csv'] = 'xlsx') -> None:
1040
+ """ Export values from polygons for a specific scenario to a file.
1041
+
1042
+ :param scenario: The name of the scenario to export values for. If None, exports values for all scenarios.
1043
+ :param simulation_key: The key of the simulation to export values for. If None, exports values for all simulations.
1044
+ :param which_values: The type of values to export from the polygons.
1045
+ :param operator: The operator to apply on the values extracted from the polygons.
1046
+ :param filename: The name of the file to export values to. If None, a default name will be used.
1047
+ :param format: The format of the file to export values to (csv or xlsx). Default is 'xlsx'.
1048
+ """
1049
+
1050
+ if not format in ['xlsx', 'csv']:
1051
+ logging.error(f"Format '{format}' is not supported. Supported formats are 'xlsx' and 'csv'.")
1052
+ raise ValueError(f"Format '{format}' is not supported. Supported formats are 'xlsx' and 'csv'.")
1053
+
1054
+ scenario = _sanitize_scenario_name(scenario)
1055
+
1056
+ if filename is None:
1057
+ filename = f"{self.name}_{scenario}_values"
1058
+ else:
1059
+ #remove suffix if it exists
1060
+ filename = filename.removesuffix(f'.{format}') if filename.endswith(f'.{format}') else filename
1061
+
1062
+ if scenario is None:
1063
+ # Export values for all scenarios
1064
+ for key in self._polygons.keys():
1065
+ self.export_values_as(key, simulation_key, which_values, operator, filename=f"{self.name}_{key}_values.{format}", format=format)
1066
+ return
1067
+
1068
+ if scenario not in self._polygons:
1069
+ logging.error(f"Scenario '{scenario}' not found in the analysis.")
1070
+ raise ValueError(f"Scenario '{scenario}' not found in the analysis.")
1071
+
1072
+
1073
+ if simulation_key is None:
1074
+ # Export values for all simulations
1075
+ simulation_key = self.list_sims_in_polygons(scenario)
1076
+ if not simulation_key:
1077
+ logging.error(f"No simulations found for scenario '{scenario}'.")
1078
+ raise ValueError(f"No simulations found for scenario '{scenario}'.")
1079
+
1080
+ poly = self.get_polygon(scenario)
1081
+ for sim_key in simulation_key:
1082
+ poly.export_as((self.directories[Directory_Analysis.CACHE] / (filename + '_' + sim_key)).with_suffix(f'.{format}'), which_values, which_group=scenario, operator=operator, which_sim=sim_key)
1083
+ logging.info(f"Values exported for simulation '{sim_key}' in scenario '{scenario}' to {filename}_{sim_key}.{format}")
1084
+
1085
+ elif isinstance(simulation_key, list):
1086
+ # Export values for a list of simulations
1087
+ for sim_key in simulation_key:
1088
+ if not sim_key in self.list_sims_in_polygons(scenario):
1089
+ logging.error(f"Simulation key '{sim_key}' not found in the scenario '{scenario}'.")
1090
+ raise ValueError(f"Simulation key '{sim_key}' not found in the scenario '{scenario}'.")
1091
+
1092
+ poly = self.get_polygon(scenario)
1093
+ for sim_key in simulation_key:
1094
+ poly.export_as((self.directories[Directory_Analysis.CACHE] / (filename + '_' + sim_key)).with_suffix(f'.{format}'), which_values, which_group=scenario, operator=operator, which_sim=sim_key)
1095
+ logging.info(f"Values exported for simulation '{sim_key}' in scenario '{scenario}' to {filename}_{sim_key}.{format}")
1096
+
1097
+ else:
1098
+ if not simulation_key in self.list_sims_in_polygons(scenario):
1099
+ logging.error(f"Simulation key '{simulation_key}' not found in the scenario '{scenario}'.")
1100
+ raise ValueError(f"Simulation key '{simulation_key}' not found in the scenario '{scenario}'.")
1101
+
1102
+ self.get_polygon(scenario).export_as((self.directories[Directory_Analysis.CACHE] / (filename + '_' + simulation_key)).with_suffix(f'.{format}'), which_values, which_group= scenario, operator= operator, which_sim=simulation_key)
1103
+ logging.info(f"Values exported for simulation '{simulation_key}' in scenario '{scenario}' to {filename}_{simulation_key}.{format}")
1104
+
1105
+ def get_values_from_polygons(self, scenario: str | tuple[str, str] = None, which_value:stored_values_unk = stored_values_unk.HEAD, which_operator:operators = operators.MEDIAN) -> dict:
1106
+ """ Get values from polygons for a specific scenario.
1107
+
1108
+ :param scenario: The name of the scenario to get values for. If None, gets values for all scenarios.
1109
+ :param which_value: The type of value to extract from the polygons.
1110
+ :param which_operator: The operator to apply on the values extracted from the polygons.
1111
+ :return: A dictionary with the values extracted from the polygons.
1112
+ """
1113
+
1114
+ scenario = _sanitize_scenario_name(scenario)
1115
+
1116
+ if scenario is None:
1117
+ # Get values for all scenarios
1118
+ return {key: self.get_values_from_polygons(key, which_value, which_operator) for key in self._polygons.keys()}
1119
+
1120
+ if scenario not in self._polygons:
1121
+ logging.error(f"Scenario '{scenario}' not found in the analysis.")
1122
+ raise ValueError(f"Scenario '{scenario}' not found in the analysis.")
1123
+
1124
+ poly = self.get_polygon(scenario)
1125
+ return poly.get_river_values_op(which_value, which_group=scenario, operator= which_operator)
1126
+
1127
+ def list_groups_in_polygons(self, scenario: str | tuple[str, str]) -> list[str]:
1128
+ """ List the groups in the polygons used in the analysis.
1129
+ :param scenario: The name of the scenario to list groups for. If None, lists groups for all scenarios.
1130
+ """
1131
+ scenario = _sanitize_scenario_name(scenario)
1132
+
1133
+ return self._polygons[scenario][0].list_groups() if scenario in self._polygons else []
1134
+
1135
+ def list_sims_in_polygons(self, scenario: str | tuple[str, str]) -> list[str]:
1136
+ """ List the simulations in the polygons used in the analysis.
1137
+ :param scenario: The name of the scenario to list simulations for. If None, lists simulations for all scenarios.
1138
+ """
1139
+ scenario = _sanitize_scenario_name(scenario)
1140
+
1141
+ if scenario not in self._polygons:
1142
+ logging.error(f"Scenario '{scenario}' not found in the analysis.")
1143
+ raise ValueError(f"Scenario '{scenario}' not found in the analysis.")
1144
+
1145
+ ret = []
1146
+ dct= self._polygons[scenario][0].list_sims()
1147
+ for key, value in dct.items():
1148
+ ret.extend(value)
1149
+ return ret
1150
+
1151
+ def list_sims_in_all_polygons(self) -> dict:
1152
+ """ List the simulations in all polygons used in the analysis.
1153
+
1154
+ :return: A dictionary where keys are scenario names and values are lists of simulations.
1155
+ """
1156
+ ret = {}
1157
+ # Append the simulations for each scenario
1158
+ for scenario, polygons in self._polygons.items():
1159
+ if not isinstance(polygons[0], Polygons_Analyze):
1160
+ logging.error(f"Polygons for scenario '{scenario}' are not instances of Polygons_Analyze.")
1161
+ raise TypeError(f"Polygons for scenario '{scenario}' are not instances of Polygons_Analyze.")
1162
+
1163
+ dct = polygons[0].list_sims()
1164
+ for key, value in dct.items():
1165
+ ret[key] = value
1166
+ return ret
1167
+
1168
+ def cache_data_to_disk(self) -> None:
1169
+ """ Enable or disable caching of extracted data from polygons.
1170
+
1171
+ :param cache: If True, enable caching. If False, disable caching.
1172
+ """
1173
+ for key, polygons in self._polygons.items():
1174
+ if not isinstance(polygons[0], Polygons_Analyze):
1175
+ logging.error(f"Polygons for scenario '{key}' are not instances of Polygons_Analyze.")
1176
+ raise TypeError(f"Polygons for scenario '{key}' are not instances of Polygons_Analyze.")
1177
+
1178
+ if isinstance(key, tuple):
1179
+ key = '_'.join(key)
1180
+
1181
+ polygons[0].cache_data((self.directories[Directory_Analysis.CACHE] / (self.name + '_' + key)).with_suffix('.json'))
1182
+
1183
+ def load_cached_data(self) -> None:
1184
+ """ Load cached data from polygons if available.
1185
+
1186
+ This will load the cached data from the polygons used in the analysis.
1187
+ """
1188
+ for key, polygons in self._polygons.items():
1189
+ if not isinstance(polygons[0], Polygons_Analyze):
1190
+ logging.error(f"Polygons for scenario '{key}' are not instances of Polygons_Analyze.")
1191
+ raise TypeError(f"Polygons for scenario '{key}' are not instances of Polygons_Analyze.")
1192
+
1193
+ if isinstance(key, tuple):
1194
+ key = '_'.join(key)
1195
+
1196
+ cache_file = (self.directories[Directory_Analysis.CACHE] / (self.name + '_' + key)).with_suffix('.json')
1197
+ polygons[0].load_data(cache_file)
1198
+
1199
+ def extract_data_from_polygons(self) -> dict:
1200
+ """ Extract data from polygons used in the analysis.
1201
+
1202
+ Apply on all projects in the analysis.
1203
+ """
1204
+
1205
+ if len(self._polygons) == 0:
1206
+ logging.error("No polygons have been set for the analysis.")
1207
+ raise ValueError("No polygons have been set for the analysis.")
1208
+ if self._multiprojects is None:
1209
+ logging.error("MultiProjects instance is not created. Please create a MultiProjects instance first.")
1210
+ raise ValueError("MultiProjects instance is not created. Please create a MultiProjects instance first.")
1211
+
1212
+ if not isinstance(self._multiprojects, MultiProjects):
1213
+ logging.error("The _multiprojects attribute is not an instance of MultiProjects.")
1214
+ raise TypeError("The _multiprojects attribute is not an instance of MultiProjects.")
1215
+
1216
+ # Polygon keys must be the same as the project names in the MultiProjects instance
1217
+ for key, polygons in self._polygons.items():
1218
+ sims = self._multiprojects.get_simulations_dict(key)
1219
+ if len(sims) == 0:
1220
+ logging.error(f"No simulations found for scenario '{key}'. Please load simulations first.")
1221
+ continue
1222
+ logging.info(f"Extracting data from polygons for scenario '{key}' with {len(sims[list(sims.keys())[0]])} simulations.")
1223
+ polygons[0].find_values_inside_parts(sims)
1224
+ logging.info("Data extraction from polygons completed.")
1225
+
1226
+ def load_results_for_all(self, epsilon:float = 0.001, verbose:bool = True):
1227
+ """ Load results for all projects in the analysis.
1228
+
1229
+ :param epsilon: The tolerance for considering wet cells as wet.
1230
+ """
1231
+
1232
+ if self._multiprojects is None:
1233
+ logging.error("MultiProjects instance is not created. Please create a MultiProjects instance first.")
1234
+ raise ValueError("MultiProjects instance is not created. Please create a MultiProjects instance first.")
1235
+
1236
+ self._multiprojects.load_simulations(epsilon= epsilon, verbose= verbose)
1237
+
1238
+ def add_projects(self, projects:list[tuple[str | tuple, str]]) -> None:
1239
+ """ Create a MultiProjects instance for managing all scenario results.
1240
+ """
1241
+ self._multiprojects = MultiProjects(self.directories[Directory_Analysis.PROJECTS])
1242
+
1243
+ # check if projects exist in the projects directory
1244
+ if not isinstance(projects, list):
1245
+ logging.error("Projects must be a list of tuples (scenario_name, project_name).")
1246
+ raise ValueError("Projects must be a list of tuples (scenario_name, project_name).")
1247
+
1248
+ if not all(isinstance(project, tuple) and len(project) == 2 for project in projects):
1249
+ logging.error("Each project must be a tuple of (scenario_name, project_name).")
1250
+ raise ValueError("Each project must be a tuple of (scenario_name, project_name).")
1251
+
1252
+ if not all(isinstance(scenario, (str, tuple)) and isinstance(project, str) for scenario, project in projects):
1253
+ logging.error("Both scenario and project names must be strings.")
1254
+ raise ValueError("Both scenario and project names must be strings.")
1255
+
1256
+ # check if project exists in the projects directory
1257
+ err = False
1258
+ for scenario, project in projects:
1259
+ project_path = self.directories[Directory_Analysis.PROJECTS] / project
1260
+ if not project_path.exists():
1261
+ logging.error(f"Project '{project}' does not exist in the projects directory.")
1262
+ err = True
1263
+ if err:
1264
+ logging.error("One or more projects do not exist in the projects directory.")
1265
+ raise FileNotFoundError("One or more projects do not exist in the projects directory.")
1266
+
1267
+ for scenario, project in projects:
1268
+ self._multiprojects.add(project, _sanitize_scenario_name(scenario), project_type.WOLF2D)
1269
+
1270
+ logging.info("MultiProjects instance created successfully.")
1271
+
1272
+ @property
1273
+ def viewer(self) -> WolfMapViewer | None:
1274
+ """ Return the map viewer instance. """
1275
+ if self.mapviewer is None:
1276
+ logging.error("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1277
+ raise ValueError("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1278
+ return self.mapviewer
1279
+
1280
+ def autoscale(self) -> None:
1281
+ """ Autoscale the map viewer to fit the current bounds. """
1282
+ if self.mapviewer is None:
1283
+ logging.error("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1284
+ raise ValueError("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1285
+
1286
+ self.mapviewer.Autoscale()
1287
+
1288
+ @property
1289
+ def viewer_bounds(self, rounded:bool = True, decimal:int = 0) -> list:
1290
+ """ Return the current bounds of the map viewer.
1291
+
1292
+ :return: A list with [xmin, ymin, xmax, ymax] representing the current bounds.
1293
+ """
1294
+ if self.mapviewer is None:
1295
+ logging.error("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1296
+ raise ValueError("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1297
+
1298
+ xmin, ymin, xmax, ymax = self.mapviewer.get_canvas_bounds()
1299
+ if rounded:
1300
+ xmin = round(xmin, decimal)
1301
+ ymin = round(ymin, decimal)
1302
+ xmax = round(xmax, decimal)
1303
+ ymax = round(ymax, decimal)
1304
+ return xmin, ymin, xmax, ymax
1305
+ else:
1306
+ return self.mapviewer.get_canvas_bounds()
1307
+
1308
+ def add_vector2viewer(self, vectorfile: str, id: str) -> None:
1309
+ """ Add a vector to the map viewer.
1310
+
1311
+ :param vectorfile: The filename of the vector file to be added.
1312
+ :param id: The id of the vector to be displayed in the map viewer.
1313
+ """
1314
+ if self.mapviewer is None:
1315
+ logging.error("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1316
+ raise ValueError("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1317
+
1318
+ if not isinstance(vectorfile, str):
1319
+ logging.error("Vector file must be a string representing the filename.")
1320
+ raise ValueError("Vector file must be a string representing the filename.")
1321
+
1322
+ if not isinstance(id, str):
1323
+ logging.error("Vector id must be a string.")
1324
+ raise ValueError("Vector id must be a string.")
1325
+
1326
+ # check if vectorfile exists
1327
+ vector_path = self.directories[Directory_Analysis.VECTORS] / vectorfile
1328
+ if not vector_path.exists():
1329
+ logging.error(f"Vector file '{vectorfile}' does not exist in the vectors directory.")
1330
+ raise FileNotFoundError(f"Vector file '{vectorfile}' does not exist in the vectors directory.")
1331
+
1332
+ # check oif id exists
1333
+ ids = self.mapviewer.get_list_keys(drawing_type=draw_type.VECTORS, checked_state=None)
1334
+ if id in ids:
1335
+ logging.warning(f"Vector with id '{id}' already exists in the map viewer. Choose another id.")
1336
+ raise ValueError(f"Vector with id '{id}' already exists in the map viewer. Choose another id.")
1337
+
1338
+ self.mapviewer.add_object('vector', filename = str(self.directories[Directory_Analysis.VECTORS] / vectorfile), id=id)
1339
+
1340
+ def get_polygon(self, scenario: str | tuple[str,str] = None) -> Polygons_Analyze:
1341
+ """ Get the polygons for a specific scenario.
1342
+
1343
+ :param scenario: The name of the scenario to get polygons for. If None, returns polygons for all scenarios.
1344
+ :return: An instance of Polygons_Analyze.
1345
+ """
1346
+ scenario = _sanitize_scenario_name(scenario)
1347
+
1348
+ if scenario not in self._polygons:
1349
+ logging.error(f"Scenario '{scenario}' not found in the analysis.")
1350
+ raise ValueError(f"Scenario '{scenario}' not found in the analysis.")
1351
+
1352
+ return self._polygons[scenario][0]
1353
+
1354
+ def cache_xys(self, scenario: str | tuple[str,str] = None) -> None:
1355
+ """ Cache the x and y coordinates of the polygons for a specific scenario.
1356
+
1357
+ :param scenario: The name of the scenario to cache x and y coordinates for. If None, caches for all scenarios.
1358
+ """
1359
+ if scenario is None:
1360
+ # Cache xys for all scenarios
1361
+ for key in self._polygons.keys():
1362
+ self.cache_xys(key)
1363
+ logging.info("Cached x and y coordinates for all scenarios.")
1364
+ return
1365
+
1366
+ scenario = _sanitize_scenario_name(scenario)
1367
+
1368
+ if scenario not in self._polygons:
1369
+ logging.error(f"Scenario '{scenario}' not found in the analysis.")
1370
+ raise ValueError(f"Scenario '{scenario}' not found in the analysis.")
1371
+
1372
+ poly = self.get_polygon(scenario)
1373
+ poly.save_xy_s_tofile(self.directories[Directory_Analysis.CACHE] / f"{scenario}_xy_s.csv")
1374
+ logging.info(f"Cached x and y coordinates for scenario '{scenario}' to file f'{scenario}_xy_s.csv'.")
1375
+
1376
+ def get_polygons_informations(self, scenario: str | tuple[str,str] = None) -> dict:
1377
+ """ Get the information of the polygons for a specific scenario.
1378
+
1379
+ :param scenario: The name of the scenario to get polygons information for. If None, returns information for all scenarios.
1380
+ :return: A dictionary with the polygons information.
1381
+ """
1382
+ scenario = _sanitize_scenario_name(scenario)
1383
+
1384
+ if scenario is None:
1385
+ # Return information for all scenarios
1386
+ return {key: self.get_polygons_informations(key) for key in self._polygons.keys()}
1387
+
1388
+ if scenario not in self._polygons:
1389
+ logging.error(f"Scenario '{scenario}' not found in the analysis.")
1390
+ raise ValueError(f"Scenario '{scenario}' not found in the analysis.")
1391
+
1392
+ poly = self.get_polygon(scenario).myzones[-1]
1393
+ return {_("Number of polygons"): poly.nbvectors,
1394
+ _("Spacing") : f"{poly.myvectors[1].myvertices[0].z - poly.myvectors[0].myvertices[0].z:.3f} m",}
1395
+
1396
+ @property
1397
+ def polygons(self) -> dict:
1398
+ """ Return the polygons used in the analysis. """
1399
+ return [(key, poly[1]) for key, poly in self._polygons.items()]
1400
+
1401
+ @polygons.setter
1402
+ def polygons(self, polygons: list[tuple[str,str]]) -> None:
1403
+ """ Set the polygons for the analysis.
1404
+
1405
+ :param polygons: A list of tuples where each tuple contains the scenario name and the polygon name.
1406
+ """
1407
+
1408
+ if not isinstance(polygons, list):
1409
+ logging.error("Polygons must be a list.")
1410
+ raise ValueError("Polygons must be a list.")
1411
+
1412
+ if not all(isinstance(polygon, tuple) and len(polygon) == 2 for polygon in polygons):
1413
+ logging.error("Each polygon must be a tuple of (scenario_name, polygon_name).")
1414
+ raise ValueError("Each polygon must be a tuple of (scenario_name, polygon_name).")
1415
+
1416
+ # check if all vector files exist
1417
+ for polygon in polygons:
1418
+ scenario_name, polygon_name = polygon
1419
+ scenario_name = _sanitize_scenario_name(scenario_name)
1420
+
1421
+ if scenario_name not in self.scenarios_directories:
1422
+ logging.error(f"Scenario '{scenario_name}' not found in the analysis.")
1423
+ raise ValueError(f"Scenario '{scenario_name}' not found in the analysis.")
1424
+
1425
+ vector_path = self.directories[Directory_Analysis.VECTORS] / f"{polygon_name}"
1426
+ if not vector_path.exists():
1427
+ logging.error(f"Polygon vector file '{vector_path}' does not exist.")
1428
+ raise ValueError(f"Polygon vector file '{vector_path}' does not exist.")
1429
+
1430
+ self._polygons = {_sanitize_scenario_name(polygon[0]): (Polygons_Analyze(self.directories[Directory_Analysis.VECTORS] / polygon[1]), polygon[1]) for polygon in polygons}
1431
+ logging.info(f"Polygons set.")
1432
+
1433
+ def set_reference_riverbed(self, scenario: str | tuple) -> None:
1434
+ """ Set the reference riverbed for the analysis.
1435
+
1436
+ :param scenario_name: The name of the scenario to set the reference riverbed for.
1437
+ """
1438
+ scenario = _sanitize_scenario_name(scenario)
1439
+
1440
+ if scenario not in self._polygons.keys():
1441
+ logging.error(f"Scenario '{scenario}' not found in the analysis.")
1442
+ raise ValueError(f"Scenario '{scenario}' not found in the analysis.")
1443
+
1444
+ self._reference_polygon = self._polygons[scenario][0]
1445
+
1446
+ for key, polygons in self._polygons.items():
1447
+ if key != scenario:
1448
+ polygons[0].compute_distance(self._reference_polygon.riverbed.linestring)
1449
+ logging.info(f"Reference riverbed set for scenarios from '{scenario}'.")
1450
+
1451
+ @property
1452
+ def return_periods(self) -> list:
1453
+ """ Return the list of return periods for the analysis. """
1454
+ return self._return_periods
1455
+
1456
+ @return_periods.setter
1457
+ def return_periods(self, periods: list[str]) -> None:
1458
+ """ Set the return periods for the analysis.
1459
+
1460
+ :param periods: A list of return periods to set.
1461
+ """
1462
+ if not isinstance(periods, list):
1463
+ logging.error("Return periods must be a list.")
1464
+ raise ValueError("Return periods must be a list.")
1465
+
1466
+ if not all(isinstance(period, str) for period in periods):
1467
+ logging.error("All return periods must be string.")
1468
+ raise ValueError("All return periods must be string.")
1469
+
1470
+ self._return_periods = periods
1471
+ logging.info(f"Return periods set to: {self._return_periods}")
1472
+
1473
+ @property
1474
+ def backgrounds(self) -> str | None:
1475
+ """ Return the name of the orthophoto. """
1476
+ return self._background_images
1477
+
1478
+ @backgrounds.setter
1479
+ def backgrounds(self, name: str | list[str]) -> None:
1480
+ """ Set the orthophoto for the analysis.
1481
+
1482
+ :param name: The name of the orthophoto
1483
+ """
1484
+ # check if available in mapviewer
1485
+ if self.mapviewer is None:
1486
+ logging.error("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1487
+ raise ValueError("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1488
+
1489
+ backgrounds = self.mapviewer.get_list_keys(drawing_type=draw_type.WMSBACK, checked_state=None)
1490
+
1491
+ err = False
1492
+
1493
+ if isinstance(name, str):
1494
+ name = [name]
1495
+
1496
+ for background in name:
1497
+ if background not in backgrounds:
1498
+ logging.error(f"Background '{background}' not found.")
1499
+ err = True
1500
+
1501
+ if err:
1502
+ back = '\n'.join(backgrounds)
1503
+ logging.info(f"Available backgrounds:\n{back}")
1504
+ raise ValueError(f"Orthophoto '{name}' not found in the available backgrounds.")
1505
+
1506
+ self._background_images = name
1507
+ logging.info(f"Background images set to: {self._background_images}")
1508
+
1509
+ def list_backgrounds(self) -> list[str]:
1510
+ """ List the available backgrounds in the map viewer.
1511
+
1512
+ :return: A list of available background names.
1513
+ """
1514
+ if self.mapviewer is None:
1515
+ logging.error("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1516
+ raise ValueError("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1517
+
1518
+ backgrounds = self.mapviewer.get_list_keys(drawing_type=draw_type.WMSBACK, checked_state=None)
1519
+ return backgrounds
1520
+
1521
+ def list_simulations(self, scenario:str = None) -> list[str]:
1522
+ """ List the available simulations in the analysis.
1523
+
1524
+ :param scenario: The name of the scenario to list simulations for. If None, lists all simulations.
1525
+ :return: A list of available simulation names.
1526
+ """
1527
+
1528
+ if scenario is None:
1529
+ return {key : list_directories(dir/'simulations') for key,dir in self.scenarios_directories.items()}
1530
+ else:
1531
+ if scenario not in self.scenarios_directories:
1532
+ logging.error(f"Scenario '{scenario}' not found in the analysis.")
1533
+ raise ValueError(f"Scenario '{scenario}' not found in the analysis.")
1534
+ return list_directories(self.scenarios_directories[scenario] / 'simulations')
1535
+
1536
+ def check_directories(self) -> bool:
1537
+ """ Check if the analysis directories exist.
1538
+
1539
+ :return: True if all directories exist, False otherwise.
1540
+ """
1541
+ return check_analysis_directories(self.base_directory)
1542
+
1543
+ def add_scenarios(self, scenario_names: list[tuple[str, str]]) -> dict:
1544
+ """ Add scenarios to the analysis.
1545
+
1546
+ :param scenario_names: A list of scenario names to add.
1547
+ :return: A dictionary with the paths of the added scenarios.
1548
+ """
1549
+
1550
+ # check scanerio_names are tuples of (name, title)
1551
+ if not all(isinstance(scen, tuple) and len(scen) == 2 for scen in scenario_names):
1552
+ logging.error("Scenario names must be a list of tuple (name, title).")
1553
+ raise ValueError("Scenario names must be a list of tuple (name, title).")
1554
+
1555
+ # check all scenarios are different
1556
+ if len(scenario_names) != len(set(scen[0] for scen in scenario_names)):
1557
+ logging.error("Scenario names must be unique.")
1558
+ raise ValueError("Scenario names must be unique.")
1559
+
1560
+ # check all titles are different
1561
+ if len(scenario_names) != len(set(scen[1] for scen in scenario_names)):
1562
+ logging.error("Scenario titles must be unique.")
1563
+ raise ValueError("Scenario titles must be unique.")
1564
+
1565
+ if not check_if_scenarios_exist(self.storage_directory, [scen[0] for scen in scenario_names]):
1566
+ logging.error(f"You need to check your scenario names or your storage directory.")
1567
+ raise ValueError("One or more scenarios do not exist in the specified base directory.")
1568
+
1569
+ self.scenarios_directories = get_scenarios_directories(self.storage_directory, [scen[0] for scen in scenario_names])
1570
+ self.scenarios = scenario_names
1571
+
1572
+ # check if simulations exist for each scenario
1573
+ for scen in self.scenarios_directories.values():
1574
+ if (scen / 'simulations').exists():
1575
+ logging.info(f"Scenario '{scen}' has simulations available.")
1576
+ else:
1577
+ logging.warning(f"Scenario '{scen}' does not have simulations available. You may need to run simulations first.")
1578
+
1579
+ def get_image(self, bounds: list|tuple, ds:float = None) -> tuple[plt.Figure, plt.Axes]:
1580
+ """ Get a figure and axes for displaying the map with the specified bounds.
1581
+
1582
+ :param bounds: A list or a tuple with [xmin, ymin, xmax, ymax] defining the bounds to zoom in on.
1583
+ """
1584
+
1585
+ if self.mapviewer is None:
1586
+ logging.error("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1587
+ raise ValueError("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1588
+
1589
+ if not isinstance(bounds, (list, tuple)) or len(bounds) != 4:
1590
+ logging.error("Bounds must be a list or tuple with four elements: [xmin, ymin, xmax, ymax].")
1591
+ raise ValueError("Bounds must be a list or tuple with four elements: [xmin, ymin, xmax, ymax].")
1592
+
1593
+ xmin, ymin, xmax, ymax = bounds
1594
+ self.mapviewer.zoom_on({'xmin':xmin, 'xmax':xmax, 'ymin':ymin, 'ymax':ymax}, forceupdate=True)
1595
+ fig, ax = plt.subplots()
1596
+
1597
+ if ds is None:
1598
+
1599
+ ds = int(min(abs(xmax - xmin), abs(ymax - ymin)) / 5)
1600
+ if ds < 100:
1601
+ # rounded to 10
1602
+ ds = 10 * (ds // 10)
1603
+ elif ds < 500:
1604
+ # rounded to 10
1605
+ ds = 100 * (ds // 100)
1606
+ elif ds < 1000:
1607
+ # rounded to 500
1608
+ ds = 500 * (ds // 500)
1609
+ elif ds < 10000:
1610
+ # rounded to 1000
1611
+ ds = 1000 * (ds // 1000)
1612
+
1613
+ else:
1614
+ if not isinstance(ds, (int, float)):
1615
+ logging.error("ds must be a number.")
1616
+ raise ValueError("ds must be a number.")
1617
+
1618
+ if ds <= 0:
1619
+ logging.error("ds must be a positive number.")
1620
+ raise ValueError("ds must be a positive number.")
1621
+
1622
+ try:
1623
+ self.mapviewer.display_canvasogl(fig=fig, ax=ax, ds = ds)
1624
+ except Exception as e:
1625
+ logging.error(f"Error displaying the map: {e}")
1626
+ raise RuntimeError("Error displaying the map. Ensure that the MapViewer is properly initialized and the bounds are valid.")
1627
+
1628
+ return fig, ax
1629
+
1630
+ def check_backgrounds(self):
1631
+ """ Check the orthophotos in the map viewer. """
1632
+ if self.mapviewer is None:
1633
+ logging.error("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1634
+ raise ValueError("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1635
+
1636
+ for back in self.backgrounds:
1637
+ self.mapviewer.check_id(back)
1638
+ self.mapviewer.update()
1639
+
1640
+ def uncheck_backgrounds(self):
1641
+ """ Uncheck the orthophotos in the map viewer. """
1642
+ if self.mapviewer is None:
1643
+ logging.error("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1644
+ raise ValueError("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1645
+
1646
+ for back in self.backgrounds:
1647
+ self.mapviewer.uncheck_id(back)
1648
+ self.mapviewer.update()
1649
+
1650
+ def create_report(self, title: str, author: str, report_name:str) -> RapidReport:
1651
+ """ Create a report for the analysis.
1652
+
1653
+ :param title: The title of the report.
1654
+ :param author: The author of the report.
1655
+ :return: An instance of RapidReport.
1656
+ """
1657
+ self.report = create_a_report(title, author)
1658
+ self._report_name = Path(report_name).with_suffix('.docx')
1659
+ return self.report
1660
+
1661
+ def save_report(self, report_name: str = None) -> None:
1662
+ """ Save the report to the specified directory.
1663
+
1664
+ :param report_name: The name of the report file. If None, uses the default report name.
1665
+ """
1666
+ if self.report is None:
1667
+ logging.error("No report has been created yet.")
1668
+ raise ValueError("No report has been created yet.")
1669
+
1670
+ if report_name is None:
1671
+ report_name = self._report_name
1672
+
1673
+ report_path = self.directories[Directory_Analysis.REPORTS] / f"{Path(report_name).with_suffix('.docx')}"
1674
+
1675
+ if not self._report_saved_once:
1676
+ # Check if the report already exists
1677
+ if report_path.exists():
1678
+ logging.warning(f"Report {report_path} already exists. It will be overwritten.")
1679
+ else:
1680
+ logging.info(f"Creating new report at {report_path}.")
1681
+
1682
+ self._report_saved_once = True
1683
+
1684
+ self.report.save(report_path)
1685
+ logging.info(f"Report saved to {report_path}.")
1686
+
1687
+ def create_wolf_mapviewer(self) -> WolfMapViewer:
1688
+ """ Create a WolfMapViewer instance.
1689
+
1690
+ :return: An instance of WolfMapViewer.
1691
+ """
1692
+ self.mapviewer = create_a_wolf_viewer()
1693
+ return self.mapviewer
1694
+
1695
+ def set_current_scenario(self, scenario_name: str) -> None:
1696
+ """ Set the current scenario for the analysis.
1697
+
1698
+ :param scenario_name: The name of the scenario to set as current.
1699
+ """
1700
+ if scenario_name not in self.scenarios_directories:
1701
+ logging.error(f"Scenario '{scenario_name}' not found in the analysis.")
1702
+ raise ValueError(f"Scenario '{scenario_name}' not found in the analysis.")
1703
+
1704
+ self.current_scenario = self.scenarios_directories[scenario_name]
1705
+ logging.info(f"Current scenario set to {self.current_scenario}.")
1706
+
1707
+ def __getitem__(self, scenario_name: str) -> Path:
1708
+ """ Get the path of a specific scenario.
1709
+
1710
+ :param scenario_name: The name of the scenario to get.
1711
+ :return: The path to the scenario directory.
1712
+ """
1713
+ if scenario_name not in self.scenarios_directories:
1714
+ logging.error(f"Scenario '{scenario_name}' not found in the analysis.")
1715
+ raise KeyError(f"Scenario '{scenario_name}' not found in the analysis.")
1716
+ return self.scenarios_directories[scenario_name]
1717
+
1718
+ def get_scenario_names(self) -> list[str]:
1719
+ """ Get the names of the scenarios in the analysis.
1720
+
1721
+ :return: A list of scenario names.
1722
+ """
1723
+ return list(self.scenarios_directories.keys())
1724
+
1725
+ def get_scenarios_titles(self) -> list[str]:
1726
+ """ Get the titles of the scenarios in the analysis.
1727
+
1728
+ :return: A list of scenario titles.
1729
+ """
1730
+ return [scen[1] for scen in self.scenarios]
1731
+
1732
+ def report_introduction_auto(self):
1733
+ """ Automatically generate the introduction section of the report."""
1734
+
1735
+ if self.report is None:
1736
+ logging.error("No report has been created yet.")
1737
+ raise ValueError("No report has been created yet.")
1738
+
1739
+ if len(self.return_periods) == 0:
1740
+ logging.error("No return periods have been set for the analysis.")
1741
+ raise ValueError("No return periods have been set for the analysis.")
1742
+
1743
+ if len(self.scenarios) == 0:
1744
+ logging.error("No scenarios have been added to the analysis.")
1745
+ raise ValueError("No scenarios have been added to the analysis.")
1746
+
1747
+ report = self.report
1748
+
1749
+ report.add_title('Scénarios de modélisation')
1750
+ report.add_bullet_list(self.return_periods)
1751
+
1752
+ report.add_title("Méthodologie d'analyse")
1753
+ report += "L'analyse des simulations est réalisée en plusieurs étapes :"
1754
+ report.add_bullet_list(["Préparation des modélisations sur base du 'gestionnaire de scénarios'",
1755
+ "Calcul des modélisations via 'wolfgpu' pour les différentes périodes de retour",
1756
+ "Analyse des lignes d'eau pour chaque scénario",
1757
+ "Comparaison des lignes d'eau pour chaque période de retour",
1758
+ "Analyse des situations de débordement en 2D"])
1759
+
1760
+ report.add_title("Méthode de représentation des lignes d'eau")
1761
+ report.add_title("Approche générale",2)
1762
+ report += """Le lit mineur de la rivière a été délimité au moyen de polylignes (centre et 2 parallèles RG et RD).
1763
+ Ces polylignes ont été ensuite découpées en tronçon de 5 mètres pour former des polygones régulièrement répartis.
1764
+ Dans chaque polygone, les valeurs des inconnues de modélisations (hauteur d'eau et débits spécifiques selon X et Y) peuvent être extraites.
1765
+ Après avoir fait de même pour l'information topo-bathymétrique, il est possible de calculer l'altitude de surface libre de l'eau en chaque maille.
1766
+ Cela représente donc une série de valeurs en chaque polygone.
1767
+ La valeur médiane de cette série est ensuite exploitée et associée à la coordonnée curviligne du centre du polygone.
1768
+ """
1769
+ report.add_title("Variante de position du lit mineur dans les scénarios",2)
1770
+ report += """Si le lit mineur est déplacé dans un scénario, les polygones sont adaptés en fonction de la nouvelle position du lit mineur.
1771
+ Par contre, afin de procéder à une comparaison des lignes d'eau entre scénarios, il est nécessaire de choisir une référence.
1772
+ Cette référence servira à évaluer les coordonnées curvilignes de tous les polygones par projection géométrique au point le plus proche.
1773
+ La distorsion engendrée par cette approche est acceptable pour les scénarios envisagés pour Theux.
1774
+ L'attention est toutefois attirée sur le fait que cette approche pourrait ne pas être valable pour des déplacements nettement plus importants du lit mineur.
1775
+ """
1776
+
1777
+ self.save_report()
1778
+ logging.info("Introduction section of the report has been automatically generated.")
1779
+
1780
+ def report_add_figure_from_zoom(self, bounds: list|tuple, caption:str = None, ds:float = None) -> None:
1781
+ """ Add a figure to the report from the current zoomed view of the map viewer.
1782
+
1783
+ :param bounds: A list or a tuple with [xmin, ymin, xmax, ymax] defining the bounds to zoom in on.
1784
+ :param ds: The distance scale for the figure. If None, it will be calculated automatically.
1785
+ :param caption: The caption for the figure. If None, no caption will be added.
1786
+ """
1787
+ if self.report is None:
1788
+ logging.error("No report has been created yet.")
1789
+ raise ValueError("No report has been created yet.")
1790
+
1791
+ if ds is None:
1792
+ xmin, ymin, xmax, ymax = self.viewer_bounds
1793
+
1794
+ ds = int(min(abs(xmax - xmin), abs(ymax - ymin)) / 5)
1795
+ if ds < 100:
1796
+ # rounded to 10
1797
+ ds = 10 * (ds // 10)
1798
+ elif ds < 500:
1799
+ # rounded to 10
1800
+ ds = 100 * (ds // 100)
1801
+ elif ds < 1000:
1802
+ # rounded to 500
1803
+ ds = 500 * (ds // 500)
1804
+ elif ds < 10000:
1805
+ # rounded to 1000
1806
+ ds = 1000 * (ds // 1000)
1807
+
1808
+ else:
1809
+ if not isinstance(ds, (int, float)):
1810
+ logging.error("ds must be a number.")
1811
+ raise ValueError("ds must be a number.")
1812
+
1813
+ if ds <= 0:
1814
+ logging.error("ds must be a positive number.")
1815
+ raise ValueError("ds must be a positive number.")
1816
+
1817
+ fig, ax = self.get_image(bounds, ds)
1818
+ self.report.add_figure(fig, caption=caption)
1819
+ logging.info("Figure added to the report from the current zoomed view of the map viewer.")
1820
+
1821
+ return fig, ax
1822
+
1823
+ def load_modifications(self, ad2viewer:bool = True):
1824
+ """ Load modifications for scenarios from vecz files."""
1825
+
1826
+ MODIF = 'bath_assembly.vecz'
1827
+
1828
+ for key, directory in self.scenarios_directories.items():
1829
+ modif_path = directory / MODIF
1830
+
1831
+ if modif_path.exists():
1832
+ logging.info(f"Loading modifications from {modif_path}")
1833
+ self._modifications[key] = Zones(modif_path, mapviewer=self.mapviewer)
1834
+
1835
+ self._modifications[key].myzones[0].myvectors[0].unuse()
1836
+
1837
+ if ad2viewer:
1838
+ if self.mapviewer is None:
1839
+ logging.error("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1840
+ raise ValueError("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1841
+
1842
+ self.mapviewer.add_object('vector', newobj= self._modifications[key], id=f'modif_{key}', ToCheck= True)
1843
+ logging.info(f"Modifications for scenario {key} added to the map viewer.")
1844
+ else:
1845
+ logging.warning(f"No modifications found for scenario {key} at {modif_path}.")
1846
+ self._modifications[key] = None
1847
+
1848
+ def uncheck_modifications(self, scenario:str | tuple) -> None:
1849
+ """ Uncheck the modifications for a specific scenario in the map viewer.
1850
+
1851
+ :param scenario: The name of the scenario to uncheck modifications for.
1852
+ """
1853
+ if self.mapviewer is None:
1854
+ logging.error("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1855
+ raise ValueError("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1856
+
1857
+ if isinstance(scenario, tuple):
1858
+ scenario = scenario[0].strip()
1859
+
1860
+ if scenario not in self._modifications:
1861
+ logging.error(f"Scenario '{scenario}' not found in the modifications.")
1862
+ raise KeyError(f"Scenario '{scenario}' not found in the modifications.")
1863
+
1864
+ if self._modifications[scenario] is None:
1865
+ logging.error(f"No modifications loaded for scenario '{scenario}'.")
1866
+ return
1867
+
1868
+ self.mapviewer.uncheck_id(f'modif_{scenario}')
1869
+
1870
+ def uncheck_all_modifications(self) -> None:
1871
+ """ Uncheck all modifications in the map viewer."""
1872
+ if self.mapviewer is None:
1873
+ logging.error("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1874
+ raise ValueError("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1875
+
1876
+ for scenario in self._modifications.keys():
1877
+ self.uncheck_modifications(scenario)
1878
+ logging.info("All modifications unchecked in the map viewer.")
1879
+
1880
+ def check_modifications(self, scenario:str | tuple) -> bool:
1881
+ """ Check if modifications have been loaded for all scenarios.
1882
+
1883
+ :return: True if modifications are loaded for all scenarios, False otherwise.
1884
+ """
1885
+
1886
+ if self.mapviewer is None:
1887
+ logging.error("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1888
+ raise ValueError("MapViewer is not initialized. Please create a WolfMapViewer instance first.")
1889
+
1890
+ if isinstance(scenario, tuple):
1891
+ scenario = scenario[0].strip()
1892
+
1893
+ if scenario not in self._modifications:
1894
+ logging.error(f"Scenario '{scenario}' not found in the modifications.")
1895
+ raise KeyError(f"Scenario '{scenario}' not found in the modifications.")
1896
+
1897
+ if self._modifications[scenario] is None:
1898
+ logging.error(f"No modifications loaded for scenario '{scenario}'.")
1899
+ return False
1900
+
1901
+ self.mapviewer.check_id(f'modif_{scenario}')
1902
+