wolfhece 2.2.29__py3-none-any.whl → 2.2.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
wolfhece/apps/version.py CHANGED
@@ -5,7 +5,7 @@ class WolfVersion():
5
5
 
6
6
  self.major = 2
7
7
  self.minor = 2
8
- self.patch = 29
8
+ self.patch = 31
9
9
 
10
10
  def __str__(self):
11
11
 
@@ -0,0 +1,182 @@
1
+ """
2
+ Author: HECE - University of Liege, Pierre Archambeau
3
+ Date: 2025
4
+
5
+ Copyright (c) 2025 University of Liege. All rights reserved.
6
+
7
+ This script and its content are protected by copyright law. Unauthorized
8
+ copying or distribution of this file, via any medium, is strictly prohibited.
9
+ """
10
+
11
+
12
+ """ This module provides a downloader for the WOLFHECE dataset and other files freely available on the web. """
13
+
14
+ import re
15
+ import requests
16
+ import ftplib
17
+ from pathlib import Path
18
+ from enum import Enum
19
+ from typing import Union, Optional, List
20
+ from collections import namedtuple
21
+ import logging
22
+
23
+ class DownloadType(Enum):
24
+ """ Enum to define the type of download. """
25
+ HTTP = 'http'
26
+ HTTPS = 'https'
27
+ FTP = 'ftp'
28
+
29
+ class DownloadFiles(Enum):
30
+ """ Enum to define the files to download. """
31
+ WOLFARRAYS = ('bin', 'bin.txt')
32
+ TIFARRAYS = ('tif',)
33
+ TIFFARRAYS = ('tiff',)
34
+ SHPFILES = ('shp', 'dbf', 'shx', 'prj', 'cpg', 'sbn', 'sbx')
35
+ GPKGFILES = ('gpkg',)
36
+ VECFILES = ('vec', 'vec.extra')
37
+ VECZFILES = ('vecz', 'vecz.extra')
38
+ PROJECTFILES = ('proj',)
39
+
40
+ class DonwloadDirectories(Enum):
41
+ """ Enum to define the directories for downloads. """
42
+ GDBFILES = ('gdb',)
43
+
44
+
45
+ GITLAB_EXAMPLE = 'https://gitlab.uliege.be/HECE/wolf_examples/-/raw/main'
46
+ DATADIR = Path(__file__).parent / 'data' / 'downloads'
47
+
48
+ def clean_url(url: str) -> str:
49
+ """ Clean the URL by removing any query parameters or fragments.
50
+
51
+ :param url: The URL to clean.
52
+ :type url: str
53
+ :return: The cleaned URL.
54
+ :rtype: str
55
+ """
56
+ # Remove query parameters and fragments
57
+ cleaned_url = re.sub(r'\?.*|#.*', '', url)
58
+ # Remove trailing slashes
59
+ cleaned_url = re.sub(r'/+$', '', cleaned_url)
60
+ # Remove any leading or trailing whitespace
61
+ cleaned_url = cleaned_url.strip()
62
+ # Ensure slashes are consistent
63
+ cleaned_url = re.sub(r'(?<!:)//+', '/', cleaned_url)
64
+ # Convert Backslashes to forward slashes
65
+ cleaned_url = cleaned_url.replace('\\', '/')
66
+ # Ensure the URL starts with http:// or https://
67
+ if not cleaned_url.startswith(('http://', 'https://', 'ftp://')):
68
+ raise ValueError(f"Invalid URL: {url}. Must start with http://, https://, or ftp://")
69
+ return cleaned_url.strip()
70
+
71
+ def download_file(url: str, destination: Union[str, Path] = None, download_type: DownloadType = DownloadType.HTTP, load_from_cache:bool = True) -> None:
72
+ """ Download a file from the specified URL to the destination path.
73
+
74
+ :param url: The URL of the file to download.
75
+ :param destination: The path where the file will be saved.
76
+ :param download_type: The type of download (HTTP, HTTPS, FTP).
77
+ :type url: str
78
+ :type destination: Union[str, Path]
79
+ :type download_type: DownloadType
80
+ :return: None
81
+ :raises requests.HTTPError: If the HTTP request fails.
82
+ """
83
+
84
+ url = str(url).strip()
85
+ # Clean the URL
86
+ url = clean_url(url)
87
+
88
+ if destination is None:
89
+ try:
90
+ destination = DATADIR / Path(url).parent.name / Path(url).name
91
+ except:
92
+ destination = DATADIR / Path(url).name
93
+ # create the directory if it does not exist
94
+ destination.parent.mkdir(parents=True, exist_ok=True)
95
+
96
+ suffix = Path(url).suffix.lower()
97
+ # remove point from the suffix for matching
98
+ if suffix.startswith('.'):
99
+ suffix = suffix[1:]
100
+
101
+ # Find the download type based on the URL suffix
102
+ for file_type_enum in DownloadFiles:
103
+ if suffix in file_type_enum.value:
104
+ file_type = file_type_enum
105
+ break
106
+
107
+ # Create a list of files to download based on the download type
108
+ # by replacing suffix in the url with the appropriate file extensions
109
+ to_download = []
110
+ to_destination = []
111
+ for ext in file_type.value:
112
+ if ext.startswith('.'):
113
+ ext = ext[1:]
114
+ to_download.append(url.replace(suffix, f'{ext}'))
115
+ to_destination.append(destination.with_suffix(f'.{ext}'))
116
+
117
+
118
+ if download_type == DownloadType.HTTP or download_type == DownloadType.HTTPS:
119
+
120
+ for url, destination in zip(to_download, to_destination):
121
+
122
+ if load_from_cache and destination.exists():
123
+ logging.info(f"File {destination} already exists. Skipping download.")
124
+ continue
125
+
126
+ if not url.startswith(('http://', 'https://')):
127
+ raise ValueError(f"Invalid URL: {url}. Must start with http:// or https://")
128
+
129
+ try:
130
+ response = requests.get(url)
131
+ response.raise_for_status()
132
+ with open(destination, 'wb') as file:
133
+ file.write(response.content)
134
+ except requests.HTTPError as e:
135
+ logging.error(f"HTTP error occurred while downloading {url}: {e}")
136
+
137
+ elif download_type == DownloadType.FTP:
138
+
139
+ for url, destination in zip(to_download, to_destination):
140
+
141
+ if load_from_cache and destination.exists():
142
+ logging.info(f"File {destination} already exists. Skipping download.")
143
+ continue
144
+
145
+ if not url.startswith('ftp://'):
146
+ raise ValueError(f"Invalid URL: {url}. Must start with ftp://")
147
+
148
+ try:
149
+ parsed_url = ftplib.parse_ftp_url(url)
150
+ with ftplib.FTP(parsed_url.hostname) as ftp:
151
+ ftp.login()
152
+ with open(destination, 'wb') as file:
153
+ ftp.retrbinary(f'RETR {parsed_url.path}', file.write)
154
+ except ftplib.all_errors as e:
155
+ logging.error(f"FTP error occurred while downloading {url}: {e}")
156
+ else:
157
+ raise ValueError(f"Unsupported download type: {download_type}")
158
+
159
+ return to_destination[0]
160
+
161
+ def toys_dataset(dir:str, file:str, load_from_cache:bool = True):
162
+ """ Download toy files from the WOLFHECE dataset.
163
+
164
+ :param dir: The directory where the file will be saved.
165
+ :param file: The name of the file to download.
166
+ :type dir: str
167
+ :type file: str
168
+ :return: The path to the downloaded file.
169
+ """
170
+ url = f"{GITLAB_EXAMPLE}/{dir}/{file}"
171
+ destination = DATADIR / dir / file
172
+ return download_file(url, destination, load_from_cache=load_from_cache)
173
+
174
+ if __name__ == "__main__":
175
+ # Example usage
176
+ print(download_file(r'https:\\gitlab.uliege.be\HECE\wolf_examples\-\raw\main\Extract_part_array\extraction.vec'))
177
+ print(download_file('https://gitlab.uliege.be/HECE/wolf_examples/-/raw/main/Extract_part_array/extraction.vec'))
178
+ print(download_file('https://gitlab.uliege.be/HECE/wolf_examples/-/raw/main/Extract_part_array/Array_vector.proj'))
179
+ print(download_file('https://gitlab.uliege.be/HECE/wolf_examples/-/raw/main/Array_Theux_Pepinster/mnt.bin'))
180
+ print(download_file('https://gitlab.uliege.be/HECE/wolf_examples/-/raw/main/Array_Theux_Pepinster/mnt.tif'))
181
+ print(download_file('https://gitlab.uliege.be/HECE/wolf_examples/-/raw/main/PICC/PICC_Vesdre.shp'))
182
+ print(toys_dataset('Extract_part_array', 'extraction.vec'))
@@ -25,7 +25,7 @@ from .drawing_obj import Element_To_Draw
25
25
  from .PyTranslate import _
26
26
  from .wolfresults_2D import views_2D, Wolfresults_2D
27
27
  from .Results2DGPU import wolfres2DGPU
28
- from .pybridges import stored_values_pos,stored_values_unk, parts_values, operators, stored_values_coords
28
+ from .pybridges import stored_values_pos, stored_values_unk, parts_values, operators, stored_values_coords
29
29
 
30
30
  from zipfile import ZIP_DEFLATED, ZipFile
31
31
 
@@ -48,8 +48,8 @@ class Extracting_Zones(Zones):
48
48
  def __init__(self, filename='', ox: float = 0, oy: float = 0, tx: float = 0, ty: float = 0, parent=None, is2D=True, idx: str = '', plotted: bool = True, mapviewer=None, need_for_wx: bool = False) -> None:
49
49
  super().__init__(filename, ox, oy, tx, ty, parent, is2D, idx, plotted, mapviewer, need_for_wx)
50
50
 
51
- self.parts:dict = None
52
- self.linked:Union[dict, list] = None
51
+ self.parts:dict = None # Store the values inside the polygons - dict[dict] or dict[list]
52
+ self.linked:Union[dict, list] = None # Object from which the values are extracted - dict or list
53
53
 
54
54
  def cache_data(self, outputfile:str):
55
55
  """
@@ -163,14 +163,11 @@ class Extracting_Zones(Zones):
163
163
  locdict[cursim] = np.array([np.array([ tuple(lst1), np.array(lst2, dtype= np.int32)], dtype=object ) for lst1, lst2 in curnparray], dtype=object)
164
164
 
165
165
 
166
- def find_values_inside_parts(self, linked_arrays):
166
+ def find_values_inside_parts(self, linked_arrays: dict | list):
167
167
  """
168
- Récupère les valeurs à l'intérieur de la zone
168
+ Get values inside the polygons defined in the zones.
169
169
 
170
- Retour :
171
- - dictionnaire dont la clé est le nom (ou l'index) du polygone dans la zone --> parties centrale, amont ou aval
172
- - chaque entrée est un dictionnaire dont la clé 'values' contient un dictionnaire pour chaque matrice du projet
173
- - chaque élément de ce sous-dictionnaire est un tuple contenant toutes les valeurs utiles
170
+ :param linked_arrays: list or dict of arrys/simulations to link with the polygons.
174
171
 
175
172
  ***
176
173
  ATTENTION : si linked_arrays est un dictionnaire, alors un niveau supérieur est ajouté sur base des clés de ce dictionnaire, dans ce cas, self.linked est un dict et non une liste
@@ -179,6 +176,7 @@ class Extracting_Zones(Zones):
179
176
  """
180
177
  if isinstance(linked_arrays, dict):
181
178
 
179
+ self.linked = {}
182
180
  for curkey, curgroup in linked_arrays.items():
183
181
  self.linked[curkey] = [(curlink.idx, type(curlink)) for curlink in curgroup]
184
182