ras-commander 0.77.0__tar.gz → 0.79.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ras_commander-0.77.0/ras_commander.egg-info → ras_commander-0.79.0}/PKG-INFO +2 -3
- {ras_commander-0.77.0 → ras_commander-0.79.0}/README.md +1 -2
- ras_commander-0.79.0/ras_commander/HdfFluvialPluvial.py +416 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/HdfResultsMesh.py +0 -16
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/RasExamples.py +121 -2
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/RasGeo.py +2 -2
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/RasMap.py +467 -252
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/RasPrj.py +2 -1
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/__init__.py +1 -1
- {ras_commander-0.77.0 → ras_commander-0.79.0/ras_commander.egg-info}/PKG-INFO +2 -3
- {ras_commander-0.77.0 → ras_commander-0.79.0}/setup.py +1 -1
- ras_commander-0.77.0/ras_commander/HdfFluvialPluvial.py +0 -554
- {ras_commander-0.77.0 → ras_commander-0.79.0}/LICENSE +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/pyproject.toml +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/Decorators.py +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/HdfBase.py +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/HdfBndry.py +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/HdfInfiltration.py +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/HdfMesh.py +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/HdfPipe.py +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/HdfPlan.py +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/HdfPlot.py +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/HdfPump.py +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/HdfResultsPlan.py +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/HdfResultsPlot.py +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/HdfResultsXsec.py +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/HdfStruc.py +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/HdfUtils.py +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/HdfXsec.py +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/LoggingConfig.py +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/RasCmdr.py +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/RasPlan.py +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/RasUnsteady.py +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander/RasUtils.py +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander.egg-info/SOURCES.txt +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander.egg-info/dependency_links.txt +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander.egg-info/requires.txt +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/ras_commander.egg-info/top_level.txt +0 -0
- {ras_commander-0.77.0 → ras_commander-0.79.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: ras-commander
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.79.0
|
4
4
|
Summary: A Python library for automating HEC-RAS 6.x operations
|
5
5
|
Home-page: https://github.com/gpt-cmdr/ras-commander
|
6
6
|
Author: William M. Katzenmeyer, P.E., C.F.M.
|
@@ -291,7 +291,7 @@ This is useful for comparing different river systems, running scenario analyses
|
|
291
291
|
- `RasGeo`: Handles operations related to geometry files
|
292
292
|
- `RasUnsteady`: Manages unsteady flow file operations
|
293
293
|
- `RasUtils`: Contains utility functions for file operations and data management
|
294
|
-
- `RasMap`: Parses and
|
294
|
+
- `RasMap`: Parses RASMapper configuration files and automates floodplain mapping
|
295
295
|
- `RasExamples`: Manages and loads HEC-RAS example projects
|
296
296
|
|
297
297
|
#### HDF Data Access Classes
|
@@ -511,7 +511,6 @@ Additionally, we would like to acknowledge the following notable contributions a
|
|
511
511
|
|
512
512
|
- Development of additional HDF functions for detailed analysis and mapping of HEC-RAS results within the RasHdf class.
|
513
513
|
- Development of the prototype `RasCmdr` class for executing HEC-RAS simulations.
|
514
|
-
- Optimization examples and methods from (INSERT REFERENCE) for use in the Ras-Commander library examples
|
515
514
|
|
516
515
|
2. Attribution: The [`pyHMT2D`](https://github.com/psu-efd/pyHMT2D/) project by Xiaofeng Liu, which provided insights into HDF file handling methods for HEC-RAS outputs. Many of the functions in the [Ras_2D_Data.py](https://github.com/psu-efd/pyHMT2D/blob/main/pyHMT2D/Hydraulic_Models_Data/RAS_2D/RAS_2D_Data.py) file were adapted with AI for use in RAS Commander.
|
517
516
|
|
@@ -258,7 +258,7 @@ This is useful for comparing different river systems, running scenario analyses
|
|
258
258
|
- `RasGeo`: Handles operations related to geometry files
|
259
259
|
- `RasUnsteady`: Manages unsteady flow file operations
|
260
260
|
- `RasUtils`: Contains utility functions for file operations and data management
|
261
|
-
- `RasMap`: Parses and
|
261
|
+
- `RasMap`: Parses RASMapper configuration files and automates floodplain mapping
|
262
262
|
- `RasExamples`: Manages and loads HEC-RAS example projects
|
263
263
|
|
264
264
|
#### HDF Data Access Classes
|
@@ -478,7 +478,6 @@ Additionally, we would like to acknowledge the following notable contributions a
|
|
478
478
|
|
479
479
|
- Development of additional HDF functions for detailed analysis and mapping of HEC-RAS results within the RasHdf class.
|
480
480
|
- Development of the prototype `RasCmdr` class for executing HEC-RAS simulations.
|
481
|
-
- Optimization examples and methods from (INSERT REFERENCE) for use in the Ras-Commander library examples
|
482
481
|
|
483
482
|
2. Attribution: The [`pyHMT2D`](https://github.com/psu-efd/pyHMT2D/) project by Xiaofeng Liu, which provided insights into HDF file handling methods for HEC-RAS outputs. Many of the functions in the [Ras_2D_Data.py](https://github.com/psu-efd/pyHMT2D/blob/main/pyHMT2D/Hydraulic_Models_Data/RAS_2D/RAS_2D_Data.py) file were adapted with AI for use in RAS Commander.
|
484
483
|
|
@@ -0,0 +1,416 @@
|
|
1
|
+
"""
|
2
|
+
Class: HdfFluvialPluvial
|
3
|
+
|
4
|
+
All of the methods in this class are static and are designed to be used without instantiation.
|
5
|
+
|
6
|
+
List of Functions in HdfFluvialPluvial:
|
7
|
+
- calculate_fluvial_pluvial_boundary(): Returns LineStrings representing the boundary.
|
8
|
+
- generate_fluvial_pluvial_polygons(): Returns dissolved Polygons for fluvial, pluvial, and ambiguous zones.
|
9
|
+
- _process_cell_adjacencies()
|
10
|
+
- _get_boundary_cell_pairs()
|
11
|
+
- _identify_boundary_edges()
|
12
|
+
|
13
|
+
"""
|
14
|
+
|
15
|
+
from typing import Dict, List, Tuple, Set, Optional
|
16
|
+
import pandas as pd
|
17
|
+
import geopandas as gpd
|
18
|
+
from collections import defaultdict
|
19
|
+
from shapely.geometry import LineString, MultiLineString
|
20
|
+
from tqdm import tqdm
|
21
|
+
from .HdfMesh import HdfMesh
|
22
|
+
from .HdfUtils import HdfUtils
|
23
|
+
from .Decorators import standardize_input
|
24
|
+
from .HdfResultsMesh import HdfResultsMesh
|
25
|
+
from .LoggingConfig import get_logger
|
26
|
+
from pathlib import Path
|
27
|
+
|
28
|
+
logger = get_logger(__name__)
|
29
|
+
|
30
|
+
class HdfFluvialPluvial:
|
31
|
+
"""
|
32
|
+
A class for analyzing and visualizing fluvial-pluvial boundaries in HEC-RAS 2D model results.
|
33
|
+
|
34
|
+
This class provides methods to process and visualize HEC-RAS 2D model outputs,
|
35
|
+
specifically focusing on the delineation of fluvial and pluvial flood areas.
|
36
|
+
It includes functionality for calculating fluvial-pluvial boundaries based on
|
37
|
+
the timing of maximum water surface elevations.
|
38
|
+
|
39
|
+
Key Concepts:
|
40
|
+
- Fluvial flooding: Flooding from rivers/streams
|
41
|
+
- Pluvial flooding: Flooding from rainfall/surface water
|
42
|
+
- delta_t: Time threshold (in hours) used to distinguish between fluvial and pluvial cells.
|
43
|
+
Cells with max WSE time differences greater than delta_t are considered boundaries.
|
44
|
+
|
45
|
+
Data Requirements:
|
46
|
+
- HEC-RAS plan HDF file containing:
|
47
|
+
- 2D mesh cell geometry (accessed via HdfMesh)
|
48
|
+
- Maximum water surface elevation times (accessed via HdfResultsMesh)
|
49
|
+
|
50
|
+
Usage Example:
|
51
|
+
>>> from ras_commander import HdfFluvialPluvial
|
52
|
+
>>> hdf_path = Path("path/to/plan.hdf")
|
53
|
+
|
54
|
+
# To get just the boundary lines
|
55
|
+
>>> boundary_lines_gdf = HdfFluvialPluvial.calculate_fluvial_pluvial_boundary(
|
56
|
+
... hdf_path,
|
57
|
+
... delta_t=12
|
58
|
+
... )
|
59
|
+
|
60
|
+
# To get classified flood polygons
|
61
|
+
>>> flood_polygons_gdf = HdfFluvialPluvial.generate_fluvial_pluvial_polygons(
|
62
|
+
... hdf_path,
|
63
|
+
... delta_t=12,
|
64
|
+
... temporal_tolerance_hours=1.0
|
65
|
+
... )
|
66
|
+
"""
|
67
|
+
def __init__(self):
|
68
|
+
self.logger = get_logger(__name__) # Initialize logger with module name
|
69
|
+
|
70
|
+
@staticmethod
|
71
|
+
@standardize_input(file_type='plan_hdf')
|
72
|
+
def calculate_fluvial_pluvial_boundary(
|
73
|
+
hdf_path: Path,
|
74
|
+
delta_t: float = 12,
|
75
|
+
min_line_length: Optional[float] = None
|
76
|
+
) -> gpd.GeoDataFrame:
|
77
|
+
"""
|
78
|
+
Calculate the fluvial-pluvial boundary lines based on cell polygons and maximum water surface elevation times.
|
79
|
+
|
80
|
+
This function is useful for visualizing the line of transition between flooding mechanisms.
|
81
|
+
|
82
|
+
Args:
|
83
|
+
hdf_path (Path): Path to the HEC-RAS plan HDF file.
|
84
|
+
delta_t (float): Threshold time difference in hours. Cells with time differences
|
85
|
+
greater than this value are considered boundaries. Default is 12 hours.
|
86
|
+
min_line_length (float, optional): Minimum length (in CRS units) for boundary lines to be included.
|
87
|
+
Lines shorter than this will be dropped. Default is None (no filtering).
|
88
|
+
|
89
|
+
Returns:
|
90
|
+
gpd.GeoDataFrame: GeoDataFrame containing the fluvial-pluvial boundary lines.
|
91
|
+
"""
|
92
|
+
try:
|
93
|
+
logger.info("Getting cell polygons from HDF file...")
|
94
|
+
cell_polygons_gdf = HdfMesh.get_mesh_cell_polygons(hdf_path)
|
95
|
+
if cell_polygons_gdf.empty:
|
96
|
+
raise ValueError("No cell polygons found in HDF file")
|
97
|
+
|
98
|
+
logger.info("Getting maximum water surface data from HDF file...")
|
99
|
+
max_ws_df = HdfResultsMesh.get_mesh_max_ws(hdf_path)
|
100
|
+
if max_ws_df.empty:
|
101
|
+
raise ValueError("No maximum water surface data found in HDF file")
|
102
|
+
|
103
|
+
logger.info("Converting maximum water surface timestamps...")
|
104
|
+
max_ws_df['maximum_water_surface_time'] = max_ws_df['maximum_water_surface_time'].apply(
|
105
|
+
lambda x: HdfUtils.parse_ras_datetime(x) if isinstance(x, str) else x
|
106
|
+
)
|
107
|
+
|
108
|
+
logger.info("Processing cell adjacencies...")
|
109
|
+
cell_adjacency, common_edges = HdfFluvialPluvial._process_cell_adjacencies(cell_polygons_gdf)
|
110
|
+
|
111
|
+
logger.info("Extracting cell times from maximum water surface data...")
|
112
|
+
cell_times = max_ws_df.set_index('cell_id')['maximum_water_surface_time'].to_dict()
|
113
|
+
|
114
|
+
logger.info("Identifying boundary edges...")
|
115
|
+
boundary_edges = HdfFluvialPluvial._identify_boundary_edges(
|
116
|
+
cell_adjacency, common_edges, cell_times, delta_t, min_line_length=min_line_length
|
117
|
+
)
|
118
|
+
|
119
|
+
logger.info("Creating final GeoDataFrame for boundaries...")
|
120
|
+
boundary_gdf = gpd.GeoDataFrame(
|
121
|
+
geometry=boundary_edges,
|
122
|
+
crs=cell_polygons_gdf.crs
|
123
|
+
)
|
124
|
+
|
125
|
+
logger.info("Boundary line calculation completed successfully.")
|
126
|
+
return boundary_gdf
|
127
|
+
|
128
|
+
except Exception as e:
|
129
|
+
logger.error(f"Error calculating fluvial-pluvial boundary lines: {str(e)}")
|
130
|
+
return gpd.GeoDataFrame()
|
131
|
+
|
132
|
+
@staticmethod
|
133
|
+
@standardize_input(file_type='plan_hdf')
|
134
|
+
def generate_fluvial_pluvial_polygons(
|
135
|
+
hdf_path: Path,
|
136
|
+
delta_t: float = 12,
|
137
|
+
temporal_tolerance_hours: float = 1.0,
|
138
|
+
min_polygon_area_acres: Optional[float] = None
|
139
|
+
) -> gpd.GeoDataFrame:
|
140
|
+
"""
|
141
|
+
Generates dissolved polygons representing fluvial, pluvial, and ambiguous flood zones.
|
142
|
+
|
143
|
+
This function classifies each wetted cell and merges them into three distinct regions
|
144
|
+
based on the timing of maximum water surface elevation.
|
145
|
+
|
146
|
+
Optionally, for polygons classified as fluvial or pluvial, if their area is less than
|
147
|
+
min_polygon_area_acres, they are reclassified to the opposite type and merged with
|
148
|
+
adjacent polygons of that type. Ambiguous polygons are exempt from this logic.
|
149
|
+
|
150
|
+
Args:
|
151
|
+
hdf_path (Path): Path to the HEC-RAS plan HDF file.
|
152
|
+
delta_t (float): The time difference (in hours) between adjacent cells that defines
|
153
|
+
the initial boundary between fluvial and pluvial zones. Default is 12.
|
154
|
+
temporal_tolerance_hours (float): The maximum time difference (in hours) for a cell
|
155
|
+
to be considered part of an expanding region.
|
156
|
+
Default is 1.0.
|
157
|
+
min_polygon_area_acres (float, optional): Minimum polygon area (in acres). For fluvial or pluvial
|
158
|
+
polygons smaller than this, reclassify to the opposite
|
159
|
+
type and merge with adjacent polygons of that type.
|
160
|
+
Ambiguous polygons are not affected.
|
161
|
+
|
162
|
+
Returns:
|
163
|
+
gpd.GeoDataFrame: A GeoDataFrame with dissolved polygons for 'fluvial', 'pluvial',
|
164
|
+
and 'ambiguous' zones.
|
165
|
+
"""
|
166
|
+
try:
|
167
|
+
# --- 1. Data Loading and Preparation ---
|
168
|
+
logger.info("Loading mesh and results data...")
|
169
|
+
cell_polygons_gdf = HdfMesh.get_mesh_cell_polygons(hdf_path)
|
170
|
+
max_ws_df = HdfResultsMesh.get_mesh_max_ws(hdf_path)
|
171
|
+
max_ws_df['maximum_water_surface_time'] = max_ws_df['maximum_water_surface_time'].apply(
|
172
|
+
lambda x: HdfUtils.parse_ras_datetime(x) if isinstance(x, str) else x
|
173
|
+
)
|
174
|
+
cell_times = max_ws_df.set_index('cell_id')['maximum_water_surface_time'].to_dict()
|
175
|
+
|
176
|
+
logger.info("Processing cell adjacencies...")
|
177
|
+
cell_adjacency, _ = HdfFluvialPluvial._process_cell_adjacencies(cell_polygons_gdf)
|
178
|
+
|
179
|
+
# --- 2. Seeding the Classifications ---
|
180
|
+
logger.info(f"Identifying initial boundary seeds with delta_t = {delta_t} hours...")
|
181
|
+
boundary_pairs = HdfFluvialPluvial._get_boundary_cell_pairs(cell_adjacency, cell_times, delta_t)
|
182
|
+
|
183
|
+
classifications = pd.Series('unclassified', index=cell_polygons_gdf['cell_id'], name='classification')
|
184
|
+
|
185
|
+
for cell1, cell2 in boundary_pairs:
|
186
|
+
if cell_times.get(cell1) > cell_times.get(cell2):
|
187
|
+
classifications.loc[cell1] = 'fluvial'
|
188
|
+
classifications.loc[cell2] = 'pluvial'
|
189
|
+
else:
|
190
|
+
classifications.loc[cell1] = 'pluvial'
|
191
|
+
classifications.loc[cell2] = 'fluvial'
|
192
|
+
|
193
|
+
# --- 3. Iterative Region Growth ---
|
194
|
+
logger.info(f"Starting iterative region growth with tolerance = {temporal_tolerance_hours} hours...")
|
195
|
+
fluvial_frontier = set(classifications[classifications == 'fluvial'].index)
|
196
|
+
pluvial_frontier = set(classifications[classifications == 'pluvial'].index)
|
197
|
+
|
198
|
+
iteration = 0
|
199
|
+
with tqdm(desc="Region Growing", unit="iter") as pbar:
|
200
|
+
while fluvial_frontier or pluvial_frontier:
|
201
|
+
iteration += 1
|
202
|
+
|
203
|
+
next_fluvial_candidates = set()
|
204
|
+
for cell_id in fluvial_frontier:
|
205
|
+
for neighbor_id in cell_adjacency.get(cell_id, []):
|
206
|
+
if classifications.loc[neighbor_id] == 'unclassified' and pd.notna(cell_times.get(neighbor_id)):
|
207
|
+
time_diff_seconds = abs((cell_times[cell_id] - cell_times[neighbor_id]).total_seconds())
|
208
|
+
if time_diff_seconds <= temporal_tolerance_hours * 3600:
|
209
|
+
next_fluvial_candidates.add(neighbor_id)
|
210
|
+
|
211
|
+
next_pluvial_candidates = set()
|
212
|
+
for cell_id in pluvial_frontier:
|
213
|
+
for neighbor_id in cell_adjacency.get(cell_id, []):
|
214
|
+
if classifications.loc[neighbor_id] == 'unclassified' and pd.notna(cell_times.get(neighbor_id)):
|
215
|
+
time_diff_seconds = abs((cell_times[cell_id] - cell_times[neighbor_id]).total_seconds())
|
216
|
+
if time_diff_seconds <= temporal_tolerance_hours * 3600:
|
217
|
+
next_pluvial_candidates.add(neighbor_id)
|
218
|
+
|
219
|
+
# Resolve conflicts
|
220
|
+
ambiguous_cells = next_fluvial_candidates.intersection(next_pluvial_candidates)
|
221
|
+
if ambiguous_cells:
|
222
|
+
classifications.loc[list(ambiguous_cells)] = 'ambiguous'
|
223
|
+
|
224
|
+
# Classify non-conflicted cells
|
225
|
+
newly_fluvial = next_fluvial_candidates - ambiguous_cells
|
226
|
+
if newly_fluvial:
|
227
|
+
classifications.loc[list(newly_fluvial)] = 'fluvial'
|
228
|
+
|
229
|
+
newly_pluvial = next_pluvial_candidates - ambiguous_cells
|
230
|
+
if newly_pluvial:
|
231
|
+
classifications.loc[list(newly_pluvial)] = 'pluvial'
|
232
|
+
|
233
|
+
# Update frontiers for the next iteration
|
234
|
+
fluvial_frontier = newly_fluvial
|
235
|
+
pluvial_frontier = newly_pluvial
|
236
|
+
|
237
|
+
pbar.update(1)
|
238
|
+
pbar.set_postfix({
|
239
|
+
"Fluvial": len(fluvial_frontier),
|
240
|
+
"Pluvial": len(pluvial_frontier),
|
241
|
+
"Ambiguous": len(ambiguous_cells)
|
242
|
+
})
|
243
|
+
|
244
|
+
logger.info(f"Region growing completed in {iteration} iterations.")
|
245
|
+
|
246
|
+
# --- 4. Finalization and Dissolving ---
|
247
|
+
# Classify any remaining unclassified (likely isolated) cells as ambiguous
|
248
|
+
classifications[classifications == 'unclassified'] = 'ambiguous'
|
249
|
+
|
250
|
+
logger.info("Merging classifications with cell polygons...")
|
251
|
+
classified_gdf = cell_polygons_gdf.merge(classifications.to_frame(), left_on='cell_id', right_index=True)
|
252
|
+
|
253
|
+
logger.info("Dissolving polygons by classification...")
|
254
|
+
final_regions_gdf = classified_gdf.dissolve(by='classification', aggfunc='first').reset_index()
|
255
|
+
|
256
|
+
# --- 5. Minimum Polygon Area Filtering and Merging (if requested) ---
|
257
|
+
if min_polygon_area_acres is not None:
|
258
|
+
logger.info(f"Applying minimum polygon area filter: {min_polygon_area_acres} acres")
|
259
|
+
# Calculate area in acres (1 acre = 4046.8564224 m^2)
|
260
|
+
# If CRS is not projected, warn and skip area filtering
|
261
|
+
if not final_regions_gdf.crs or not final_regions_gdf.crs.is_projected:
|
262
|
+
logger.warning("CRS is not projected. Area-based filtering skipped.")
|
263
|
+
else:
|
264
|
+
# Explode to individual polygons for area filtering
|
265
|
+
exploded = final_regions_gdf.explode(index_parts=False, ignore_index=True)
|
266
|
+
exploded['area_acres'] = exploded.geometry.area / 4046.8564224
|
267
|
+
|
268
|
+
# Only consider fluvial and pluvial polygons for area filtering
|
269
|
+
mask_fluvial = (exploded['classification'] == 'fluvial') & (exploded['area_acres'] < min_polygon_area_acres)
|
270
|
+
mask_pluvial = (exploded['classification'] == 'pluvial') & (exploded['area_acres'] < min_polygon_area_acres)
|
271
|
+
|
272
|
+
n_fluvial = mask_fluvial.sum()
|
273
|
+
n_pluvial = mask_pluvial.sum()
|
274
|
+
logger.info(f"Found {n_fluvial} small fluvial and {n_pluvial} small pluvial polygons to reclassify.")
|
275
|
+
|
276
|
+
# Reclassify small fluvial polygons as pluvial, and small pluvial polygons as fluvial
|
277
|
+
exploded.loc[mask_fluvial, 'classification'] = 'pluvial'
|
278
|
+
exploded.loc[mask_pluvial, 'classification'] = 'fluvial'
|
279
|
+
# Ambiguous polygons are not changed
|
280
|
+
|
281
|
+
# Redissolve by classification to merge with adjacent polygons of the same type
|
282
|
+
final_regions_gdf = exploded.dissolve(by='classification', aggfunc='first').reset_index()
|
283
|
+
logger.info("Redissolved polygons after reclassification of small areas.")
|
284
|
+
|
285
|
+
logger.info("Polygon generation completed successfully.")
|
286
|
+
return final_regions_gdf
|
287
|
+
|
288
|
+
except Exception as e:
|
289
|
+
logger.error(f"Error generating fluvial-pluvial polygons: {str(e)}", exc_info=True)
|
290
|
+
return gpd.GeoDataFrame()
|
291
|
+
|
292
|
+
|
293
|
+
@staticmethod
|
294
|
+
def _process_cell_adjacencies(cell_polygons_gdf: gpd.GeoDataFrame) -> Tuple[Dict[int, List[int]], Dict[int, Dict[int, LineString]]]:
|
295
|
+
"""
|
296
|
+
Optimized method to process cell adjacencies by extracting shared edges directly.
|
297
|
+
"""
|
298
|
+
cell_adjacency = defaultdict(list)
|
299
|
+
common_edges = defaultdict(dict)
|
300
|
+
edge_to_cells = defaultdict(set)
|
301
|
+
|
302
|
+
def edge_key(coords1, coords2, precision=8):
|
303
|
+
coords1 = tuple(round(coord, precision) for coord in coords1)
|
304
|
+
coords2 = tuple(round(coord, precision) for coord in coords2)
|
305
|
+
return tuple(sorted([coords1, coords2]))
|
306
|
+
|
307
|
+
for _, row in cell_polygons_gdf.iterrows():
|
308
|
+
cell_id = row['cell_id']
|
309
|
+
geom = row['geometry']
|
310
|
+
if geom.is_empty or not geom.is_valid:
|
311
|
+
continue
|
312
|
+
coords = list(geom.exterior.coords)
|
313
|
+
for i in range(len(coords) - 1):
|
314
|
+
key = edge_key(coords[i], coords[i + 1])
|
315
|
+
edge_to_cells[key].add(cell_id)
|
316
|
+
|
317
|
+
for edge, cells in edge_to_cells.items():
|
318
|
+
cell_list = list(cells)
|
319
|
+
if len(cell_list) >= 2:
|
320
|
+
for i in range(len(cell_list)):
|
321
|
+
for j in range(i + 1, len(cell_list)):
|
322
|
+
cell1, cell2 = cell_list[i], cell_list[j]
|
323
|
+
cell_adjacency[cell1].append(cell2)
|
324
|
+
cell_adjacency[cell2].append(cell1)
|
325
|
+
common_edge = LineString([edge[0], edge[1]])
|
326
|
+
common_edges[cell1][cell2] = common_edge
|
327
|
+
common_edges[cell2][cell1] = common_edge
|
328
|
+
|
329
|
+
return cell_adjacency, common_edges
|
330
|
+
|
331
|
+
@staticmethod
|
332
|
+
def _get_boundary_cell_pairs(
|
333
|
+
cell_adjacency: Dict[int, List[int]],
|
334
|
+
cell_times: Dict[int, pd.Timestamp],
|
335
|
+
delta_t: float
|
336
|
+
) -> List[Tuple[int, int]]:
|
337
|
+
"""
|
338
|
+
Identifies pairs of adjacent cell IDs that form a boundary.
|
339
|
+
|
340
|
+
A boundary is defined where the difference in max water surface time
|
341
|
+
between two adjacent cells is greater than delta_t.
|
342
|
+
|
343
|
+
Args:
|
344
|
+
cell_adjacency (Dict[int, List[int]]): Dictionary of cell adjacencies.
|
345
|
+
cell_times (Dict[int, pd.Timestamp]): Dictionary mapping cell IDs to their max WSE times.
|
346
|
+
delta_t (float): Time threshold in hours.
|
347
|
+
|
348
|
+
Returns:
|
349
|
+
List[Tuple[int, int]]: A list of tuples, where each tuple contains a pair of
|
350
|
+
cell IDs forming a boundary.
|
351
|
+
"""
|
352
|
+
boundary_cell_pairs = []
|
353
|
+
processed_pairs = set()
|
354
|
+
delta_t_seconds = delta_t * 3600
|
355
|
+
|
356
|
+
for cell_id, neighbors in cell_adjacency.items():
|
357
|
+
time1 = cell_times.get(cell_id)
|
358
|
+
if not pd.notna(time1):
|
359
|
+
continue
|
360
|
+
|
361
|
+
for neighbor_id in neighbors:
|
362
|
+
pair = tuple(sorted((cell_id, neighbor_id)))
|
363
|
+
if pair in processed_pairs:
|
364
|
+
continue
|
365
|
+
|
366
|
+
time2 = cell_times.get(neighbor_id)
|
367
|
+
if not pd.notna(time2):
|
368
|
+
continue
|
369
|
+
|
370
|
+
time_diff = abs((time1 - time2).total_seconds())
|
371
|
+
|
372
|
+
if time_diff >= delta_t_seconds:
|
373
|
+
boundary_cell_pairs.append(pair)
|
374
|
+
|
375
|
+
processed_pairs.add(pair)
|
376
|
+
|
377
|
+
return boundary_cell_pairs
|
378
|
+
|
379
|
+
@staticmethod
|
380
|
+
def _identify_boundary_edges(
|
381
|
+
cell_adjacency: Dict[int, List[int]],
|
382
|
+
common_edges: Dict[int, Dict[int, LineString]],
|
383
|
+
cell_times: Dict[int, pd.Timestamp],
|
384
|
+
delta_t: float,
|
385
|
+
min_line_length: Optional[float] = None
|
386
|
+
) -> List[LineString]:
|
387
|
+
"""
|
388
|
+
Identify boundary edges between cells with significant time differences.
|
389
|
+
|
390
|
+
This function now uses the helper `_get_boundary_cell_pairs`.
|
391
|
+
|
392
|
+
Args:
|
393
|
+
cell_adjacency (Dict[int, List[int]]): Dictionary of cell adjacencies.
|
394
|
+
common_edges (Dict[int, Dict[int, LineString]]): Dictionary of shared edges between cells.
|
395
|
+
cell_times (Dict[int, pd.Timestamp]): Dictionary mapping cell IDs to their max WSE times.
|
396
|
+
delta_t (float): Time threshold in hours.
|
397
|
+
min_line_length (float, optional): Minimum length (in CRS units) for boundary lines to be included.
|
398
|
+
Lines shorter than this will be dropped. Default is None (no filtering).
|
399
|
+
|
400
|
+
Returns:
|
401
|
+
List[LineString]: List of LineString geometries representing boundaries.
|
402
|
+
"""
|
403
|
+
boundary_pairs = HdfFluvialPluvial._get_boundary_cell_pairs(cell_adjacency, cell_times, delta_t)
|
404
|
+
|
405
|
+
boundary_edges = [common_edges[c1][c2] for c1, c2 in boundary_pairs]
|
406
|
+
|
407
|
+
logger.info(f"Identified {len(boundary_edges)} boundary edges using delta_t of {delta_t} hours.")
|
408
|
+
|
409
|
+
if min_line_length is not None:
|
410
|
+
filtered_edges = [edge for edge in boundary_edges if edge.length >= min_line_length]
|
411
|
+
num_dropped = len(boundary_edges) - len(filtered_edges)
|
412
|
+
if num_dropped > 0:
|
413
|
+
logger.info(f"{num_dropped} boundary line(s) shorter than {min_line_length} units were dropped after filtering.")
|
414
|
+
boundary_edges = filtered_edges
|
415
|
+
|
416
|
+
return boundary_edges
|
@@ -42,22 +42,6 @@ HdfUtils for common operations. Methods use @log_call decorator for logging and
|
|
42
42
|
|
43
43
|
|
44
44
|
|
45
|
-
REVISIONS MADE:
|
46
|
-
|
47
|
-
Use get_ prefix for functions that return data.
|
48
|
-
BUT, we will never set results data, so we should use get_ for results data.
|
49
|
-
|
50
|
-
Renamed functions:
|
51
|
-
- mesh_summary_output() to get_mesh_summary()
|
52
|
-
- mesh_timeseries_output() to get_mesh_timeseries()
|
53
|
-
- mesh_faces_timeseries_output() to get_mesh_faces_timeseries()
|
54
|
-
- mesh_cells_timeseries_output() to get_mesh_cells_timeseries()
|
55
|
-
- mesh_last_iter() to get_mesh_last_iter()
|
56
|
-
- mesh_max_ws() to get_mesh_max_ws()
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
45
|
|
62
46
|
|
63
47
|
|
@@ -69,6 +69,12 @@ class RasExamples:
|
|
69
69
|
examples_dir = base_dir
|
70
70
|
projects_dir = examples_dir / 'example_projects'
|
71
71
|
csv_file_path = examples_dir / 'example_projects.csv'
|
72
|
+
|
73
|
+
# Special projects that are not in the main zip file
|
74
|
+
SPECIAL_PROJECTS = {
|
75
|
+
'NewOrleansMetro': 'https://www.hec.usace.army.mil/confluence/rasdocs/hgt/files/latest/299502039/299502111/1/1747692522764/NewOrleansMetroPipesExample.zip',
|
76
|
+
'BeaverLake': 'https://www.hec.usace.army.mil/confluence/rasdocs/hgt/files/latest/299501780/299502090/1/1747692179014/BeaverLake-SWMM-Import-Solution.zip'
|
77
|
+
}
|
72
78
|
|
73
79
|
_folder_df = None
|
74
80
|
_zip_file_path = None
|
@@ -148,6 +154,17 @@ class RasExamples:
|
|
148
154
|
extracted_paths = []
|
149
155
|
|
150
156
|
for project_name in project_names:
|
157
|
+
# Check if this is a special project
|
158
|
+
if project_name in cls.SPECIAL_PROJECTS:
|
159
|
+
try:
|
160
|
+
special_path = cls._extract_special_project(project_name)
|
161
|
+
extracted_paths.append(special_path)
|
162
|
+
continue
|
163
|
+
except Exception as e:
|
164
|
+
logger.error(f"Failed to extract special project '{project_name}': {e}")
|
165
|
+
continue
|
166
|
+
|
167
|
+
# Regular project extraction logic
|
151
168
|
logger.info("----- RasExamples Extracting Project -----")
|
152
169
|
logger.info(f"Extracting project '{project_name}'")
|
153
170
|
project_path = cls.projects_dir
|
@@ -319,6 +336,9 @@ class RasExamples:
|
|
319
336
|
def list_projects(cls, category=None):
|
320
337
|
"""
|
321
338
|
List all projects or projects in a specific category.
|
339
|
+
|
340
|
+
Note: Special projects (NewOrleansMetro, BeaverLake) are also available but not listed
|
341
|
+
in categories as they are downloaded separately.
|
322
342
|
"""
|
323
343
|
if cls._folder_df is None:
|
324
344
|
logger.warning("No projects available. Make sure the zip file is properly loaded.")
|
@@ -328,7 +348,10 @@ class RasExamples:
|
|
328
348
|
logger.info(f"Projects in category '{category}': {', '.join(projects)}")
|
329
349
|
else:
|
330
350
|
projects = cls._folder_df['Project'].unique()
|
331
|
-
|
351
|
+
# Add special projects to the list
|
352
|
+
all_projects = list(projects) + list(cls.SPECIAL_PROJECTS.keys())
|
353
|
+
logger.info(f"All available projects: {', '.join(all_projects)}")
|
354
|
+
return all_projects
|
332
355
|
return projects.tolist()
|
333
356
|
|
334
357
|
@classmethod
|
@@ -421,4 +444,100 @@ class RasExamples:
|
|
421
444
|
raise ValueError(f"Invalid size string: {size_str}")
|
422
445
|
|
423
446
|
number, unit = float(re.findall(r'[\d\.]+', size_str)[0]), re.findall(r'[BKMGT]B?', size_str)[0]
|
424
|
-
return int(number * units[unit])
|
447
|
+
return int(number * units[unit])
|
448
|
+
|
449
|
+
@classmethod
|
450
|
+
def _extract_special_project(cls, project_name: str) -> Path:
|
451
|
+
"""
|
452
|
+
Download and extract special projects that are not in the main zip file.
|
453
|
+
|
454
|
+
Args:
|
455
|
+
project_name: Name of the special project ('NewOrleansMetro' or 'BeaverLake')
|
456
|
+
|
457
|
+
Returns:
|
458
|
+
Path: Path to the extracted project directory
|
459
|
+
|
460
|
+
Raises:
|
461
|
+
ValueError: If the project is not a recognized special project
|
462
|
+
"""
|
463
|
+
if project_name not in cls.SPECIAL_PROJECTS:
|
464
|
+
raise ValueError(f"'{project_name}' is not a recognized special project")
|
465
|
+
|
466
|
+
logger.info(f"----- RasExamples Extracting Special Project -----")
|
467
|
+
logger.info(f"Extracting special project '{project_name}'")
|
468
|
+
|
469
|
+
# Create the project directory
|
470
|
+
project_path = cls.projects_dir / project_name
|
471
|
+
|
472
|
+
# Check if already exists
|
473
|
+
if project_path.exists():
|
474
|
+
logger.info(f"Special project '{project_name}' already exists. Deleting existing folder...")
|
475
|
+
try:
|
476
|
+
shutil.rmtree(project_path)
|
477
|
+
logger.info(f"Existing folder for project '{project_name}' has been deleted.")
|
478
|
+
except Exception as e:
|
479
|
+
logger.error(f"Failed to delete existing project folder '{project_name}': {e}")
|
480
|
+
raise
|
481
|
+
|
482
|
+
# Create the project directory
|
483
|
+
project_path.mkdir(parents=True, exist_ok=True)
|
484
|
+
|
485
|
+
# Download the zip file
|
486
|
+
url = cls.SPECIAL_PROJECTS[project_name]
|
487
|
+
zip_file_path = cls.projects_dir / f"{project_name}_temp.zip"
|
488
|
+
|
489
|
+
logger.info(f"Downloading special project from: {url}")
|
490
|
+
logger.info("This may take a few moments...")
|
491
|
+
|
492
|
+
try:
|
493
|
+
response = requests.get(url, stream=True, timeout=300)
|
494
|
+
response.raise_for_status()
|
495
|
+
|
496
|
+
# Get total file size if available
|
497
|
+
total_size = int(response.headers.get('content-length', 0))
|
498
|
+
|
499
|
+
# Download with progress bar
|
500
|
+
with open(zip_file_path, 'wb') as file:
|
501
|
+
if total_size > 0:
|
502
|
+
with tqdm(
|
503
|
+
desc=f"Downloading {project_name}",
|
504
|
+
total=total_size,
|
505
|
+
unit='iB',
|
506
|
+
unit_scale=True,
|
507
|
+
unit_divisor=1024,
|
508
|
+
) as progress_bar:
|
509
|
+
for chunk in response.iter_content(chunk_size=8192):
|
510
|
+
size = file.write(chunk)
|
511
|
+
progress_bar.update(size)
|
512
|
+
else:
|
513
|
+
# No content length, download without progress bar
|
514
|
+
for chunk in response.iter_content(chunk_size=8192):
|
515
|
+
file.write(chunk)
|
516
|
+
|
517
|
+
logger.info(f"Downloaded special project zip file to {zip_file_path}")
|
518
|
+
|
519
|
+
except requests.exceptions.RequestException as e:
|
520
|
+
logger.error(f"Failed to download special project '{project_name}': {e}")
|
521
|
+
if zip_file_path.exists():
|
522
|
+
zip_file_path.unlink()
|
523
|
+
raise
|
524
|
+
|
525
|
+
# Extract the zip file directly to the project directory
|
526
|
+
try:
|
527
|
+
with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
|
528
|
+
# Extract directly to the project directory (no internal folder structure)
|
529
|
+
zip_ref.extractall(project_path)
|
530
|
+
logger.info(f"Successfully extracted special project '{project_name}' to {project_path}")
|
531
|
+
|
532
|
+
except Exception as e:
|
533
|
+
logger.error(f"Failed to extract special project '{project_name}': {e}")
|
534
|
+
if project_path.exists():
|
535
|
+
shutil.rmtree(project_path)
|
536
|
+
raise
|
537
|
+
finally:
|
538
|
+
# Clean up the temporary zip file
|
539
|
+
if zip_file_path.exists():
|
540
|
+
zip_file_path.unlink()
|
541
|
+
logger.debug(f"Removed temporary zip file: {zip_file_path}")
|
542
|
+
|
543
|
+
return project_path
|
@@ -206,10 +206,10 @@ class RasGeo:
|
|
206
206
|
|
207
207
|
# Create DataFrame
|
208
208
|
if base_table_rows:
|
209
|
-
df = pd.DataFrame(base_table_rows, columns=['Table Number', 'Land Cover Name', 'Base
|
209
|
+
df = pd.DataFrame(base_table_rows, columns=['Table Number', 'Land Cover Name', 'Base Mannings n Value'])
|
210
210
|
return df
|
211
211
|
else:
|
212
|
-
return pd.DataFrame(columns=['Table Number', 'Land Cover Name', 'Base
|
212
|
+
return pd.DataFrame(columns=['Table Number', 'Land Cover Name', 'Base Mannings n Value'])
|
213
213
|
|
214
214
|
|
215
215
|
@staticmethod
|