ras-commander 0.44.0__py3-none-any.whl → 0.46.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ras_commander/HdfFluvialPluvial.py +317 -0
- ras_commander/HdfMesh.py +62 -15
- ras_commander/HdfPipe.py +771 -0
- ras_commander/HdfPlan.py +5 -0
- ras_commander/HdfPump.py +269 -0
- ras_commander/HdfResultsMesh.py +135 -62
- ras_commander/HdfResultsPlan.py +3 -0
- ras_commander/HdfResultsXsec.py +192 -157
- ras_commander/HdfStruc.py +148 -50
- ras_commander/HdfUtils.py +51 -0
- ras_commander/HdfXsec.py +467 -136
- ras_commander/RasPlan.py +298 -45
- ras_commander/RasToGo.py +21 -0
- ras_commander/RasUnsteady.py +615 -14
- ras_commander/__init__.py +7 -1
- {ras_commander-0.44.0.dist-info → ras_commander-0.46.0.dist-info}/METADATA +1 -1
- ras_commander-0.46.0.dist-info/RECORD +30 -0
- {ras_commander-0.44.0.dist-info → ras_commander-0.46.0.dist-info}/WHEEL +1 -1
- ras_commander-0.44.0.dist-info/RECORD +0 -26
- {ras_commander-0.44.0.dist-info → ras_commander-0.46.0.dist-info}/LICENSE +0 -0
- {ras_commander-0.44.0.dist-info → ras_commander-0.46.0.dist-info}/top_level.txt +0 -0
ras_commander/HdfPipe.py
ADDED
@@ -0,0 +1,771 @@
|
|
1
|
+
import h5py
|
2
|
+
import numpy as np
|
3
|
+
import pandas as pd
|
4
|
+
import geopandas as gpd
|
5
|
+
import xarray as xr
|
6
|
+
from pathlib import Path
|
7
|
+
from shapely.geometry import LineString, Point, MultiLineString, Polygon, MultiPolygon
|
8
|
+
from typing import List, Dict, Any, Optional, Union, Tuple
|
9
|
+
from .HdfBase import HdfBase
|
10
|
+
from .HdfUtils import HdfUtils
|
11
|
+
from .Decorators import standardize_input, log_call
|
12
|
+
from .LoggingConfig import get_logger
|
13
|
+
from .HdfResultsMesh import HdfResultsMesh
|
14
|
+
import logging
|
15
|
+
|
16
|
+
logger = get_logger(__name__)
|
17
|
+
|
18
|
+
class HdfPipe:
|
19
|
+
"""
|
20
|
+
A class for handling pipe network related data from HEC-RAS HDF files.
|
21
|
+
"""
|
22
|
+
|
23
|
+
|
24
|
+
@staticmethod
|
25
|
+
@log_call
|
26
|
+
@standardize_input(file_type='plan_hdf')
|
27
|
+
def get_pipe_conduits(hdf_path: Path, crs: Optional[str] = "EPSG:4326") -> gpd.GeoDataFrame:
|
28
|
+
"""
|
29
|
+
Combines hdf5 datasets from /Geometry/Pipe Conduits/ into a single GeoDataFrame.
|
30
|
+
|
31
|
+
Parameters:
|
32
|
+
- hdf_path: Path to the HDF5 file.
|
33
|
+
- crs: Coordinate Reference System for the GeoDataFrame. Default is "EPSG:4326".
|
34
|
+
|
35
|
+
Returns:
|
36
|
+
- A GeoDataFrame with attributes, Polyline geometries, and Terrain Profiles as separate columns.
|
37
|
+
"""
|
38
|
+
with h5py.File(hdf_path, 'r') as f:
|
39
|
+
group = f['/Geometry/Pipe Conduits/']
|
40
|
+
|
41
|
+
# --- Read and Process Attributes ---
|
42
|
+
attributes = group['Attributes'][:]
|
43
|
+
attr_df = pd.DataFrame(attributes)
|
44
|
+
|
45
|
+
# Decode byte string fields to UTF-8 strings
|
46
|
+
string_columns = attr_df.select_dtypes([object]).columns
|
47
|
+
for col in string_columns:
|
48
|
+
attr_df[col] = attr_df[col].apply(lambda x: x.decode('utf-8') if isinstance(x, bytes) else x)
|
49
|
+
|
50
|
+
# --- Read Polyline Data ---
|
51
|
+
polyline_info = group['Polyline Info'][:] # Shape (132,4) - point_start_idx, point_count, part_start_idx, part_count
|
52
|
+
polyline_points = group['Polyline Points'][:] # Shape (396,2) - x,y coordinates
|
53
|
+
|
54
|
+
polyline_geometries = []
|
55
|
+
for info in polyline_info:
|
56
|
+
point_start_idx = info[0]
|
57
|
+
point_count = info[1]
|
58
|
+
|
59
|
+
# Extract coordinates for this polyline directly using start index and count
|
60
|
+
coords = polyline_points[point_start_idx:point_start_idx + point_count]
|
61
|
+
|
62
|
+
if len(coords) < 2:
|
63
|
+
polyline_geometries.append(None)
|
64
|
+
else:
|
65
|
+
polyline_geometries.append(LineString(coords))
|
66
|
+
|
67
|
+
# --- Read Terrain Profiles Data ---
|
68
|
+
terrain_info = group['Terrain Profiles Info'][:]
|
69
|
+
terrain_values = group['Terrain Profiles Values'][:]
|
70
|
+
|
71
|
+
# Create a list of (Station, Elevation) tuples for Terrain Profiles
|
72
|
+
terrain_coords = list(zip(terrain_values[:, 0], terrain_values[:, 1]))
|
73
|
+
|
74
|
+
terrain_profiles_list: List[List[Tuple[float, float]]] = []
|
75
|
+
|
76
|
+
for i in range(len(terrain_info)):
|
77
|
+
info = terrain_info[i]
|
78
|
+
start_idx = info[0]
|
79
|
+
count = info[1]
|
80
|
+
|
81
|
+
# Extract (Station, Elevation) pairs
|
82
|
+
segment = terrain_coords[start_idx : start_idx + count]
|
83
|
+
|
84
|
+
terrain_profiles_list.append(segment) # Store the list of (Station, Elevation) tuples
|
85
|
+
|
86
|
+
# --- Combine Data into GeoDataFrame ---
|
87
|
+
attr_df['Polyline'] = polyline_geometries
|
88
|
+
attr_df['Terrain_Profiles'] = terrain_profiles_list
|
89
|
+
|
90
|
+
# Initialize GeoDataFrame with Polyline geometries
|
91
|
+
gdf = gpd.GeoDataFrame(attr_df, geometry='Polyline', crs=crs)
|
92
|
+
|
93
|
+
return gdf
|
94
|
+
|
95
|
+
|
96
|
+
@staticmethod
|
97
|
+
@log_call
|
98
|
+
@standardize_input(file_type='plan_hdf')
|
99
|
+
def get_pipe_nodes(hdf_path: Path) -> gpd.GeoDataFrame:
|
100
|
+
"""
|
101
|
+
Creates a GeoDataFrame for Pipe Node points and their attributes from an HDF5 file.
|
102
|
+
|
103
|
+
Parameters:
|
104
|
+
- hdf_path: Path to the HDF5 file.
|
105
|
+
|
106
|
+
Returns:
|
107
|
+
- A GeoDataFrame containing pipe node attributes and their geometries.
|
108
|
+
"""
|
109
|
+
with h5py.File(hdf_path, 'r') as f:
|
110
|
+
group = f['/Geometry/Pipe Nodes/']
|
111
|
+
|
112
|
+
# --- Read and Process Attributes ---
|
113
|
+
attributes = group['Attributes'][:]
|
114
|
+
attr_df = pd.DataFrame(attributes)
|
115
|
+
|
116
|
+
# Decode byte string fields to UTF-8 strings
|
117
|
+
string_columns = attr_df.select_dtypes([object]).columns # Changed 'S' to object
|
118
|
+
for col in string_columns:
|
119
|
+
attr_df[col] = attr_df[col].apply(lambda x: x.decode('utf-8') if isinstance(x, bytes) else x)
|
120
|
+
|
121
|
+
# --- Read Points Data ---
|
122
|
+
points = group['Points'][:]
|
123
|
+
# Create Shapely Point geometries
|
124
|
+
geometries = [Point(xy) for xy in points]
|
125
|
+
|
126
|
+
# --- Combine Attributes and Geometries into GeoDataFrame ---
|
127
|
+
gdf = gpd.GeoDataFrame(attr_df, geometry=geometries)
|
128
|
+
|
129
|
+
return gdf
|
130
|
+
|
131
|
+
|
132
|
+
|
133
|
+
|
134
|
+
@staticmethod
|
135
|
+
@log_call
|
136
|
+
@standardize_input(file_type='plan_hdf')
|
137
|
+
def get_pipe_network(hdf_path: Path, pipe_network_name: Optional[str] = None, crs: Optional[str] = "EPSG:4326") -> gpd.GeoDataFrame:
|
138
|
+
"""
|
139
|
+
Creates a GeoDataFrame for a specified pipe network from an HDF5 file.
|
140
|
+
|
141
|
+
Parameters:
|
142
|
+
- hdf_path: Path to the HDF5 file.
|
143
|
+
- pipe_network_name: Name of the pipe network to extract. If None, the first network is used.
|
144
|
+
- crs: Coordinate Reference System for the GeoDataFrame. Default is "EPSG:4326".
|
145
|
+
|
146
|
+
Returns:
|
147
|
+
- A GeoDataFrame containing cell polygons, face polylines, node points, and their associated attributes.
|
148
|
+
"""
|
149
|
+
with h5py.File(hdf_path, 'r') as f:
|
150
|
+
pipe_networks_group = f['/Geometry/Pipe Networks/']
|
151
|
+
|
152
|
+
# --- Determine Pipe Network to Use ---
|
153
|
+
attributes = pipe_networks_group['Attributes'][:]
|
154
|
+
attr_df = pd.DataFrame(attributes)
|
155
|
+
|
156
|
+
# Decode 'Name' from byte strings to UTF-8
|
157
|
+
attr_df['Name'] = attr_df['Name'].apply(lambda x: x.decode('utf-8') if isinstance(x, bytes) else x)
|
158
|
+
|
159
|
+
if pipe_network_name:
|
160
|
+
if pipe_network_name not in attr_df['Name'].values:
|
161
|
+
raise ValueError(f"Pipe network '{pipe_network_name}' not found in the HDF5 file.")
|
162
|
+
network_idx = attr_df.index[attr_df['Name'] == pipe_network_name][0]
|
163
|
+
else:
|
164
|
+
network_idx = 0 # Default to first network
|
165
|
+
|
166
|
+
# Get the name of the selected pipe network
|
167
|
+
selected_network_name = attr_df.at[network_idx, 'Name']
|
168
|
+
logging.info(f"Selected Pipe Network: {selected_network_name}")
|
169
|
+
|
170
|
+
# Access the selected pipe network group
|
171
|
+
network_group_path = f"/Geometry/Pipe Networks/{selected_network_name}/"
|
172
|
+
network_group = f[network_group_path]
|
173
|
+
|
174
|
+
# --- Helper Functions ---
|
175
|
+
def decode_bytes(df: pd.DataFrame) -> pd.DataFrame:
|
176
|
+
"""Decode byte string columns to UTF-8."""
|
177
|
+
string_columns = df.select_dtypes([object]).columns
|
178
|
+
for col in string_columns:
|
179
|
+
df[col] = df[col].apply(lambda x: x.decode('utf-8') if isinstance(x, bytes) else x)
|
180
|
+
return df
|
181
|
+
|
182
|
+
def build_polygons(info, parts, points) -> List[Optional[Polygon or MultiPolygon]]:
|
183
|
+
"""Build Shapely Polygon or MultiPolygon geometries from HDF5 datasets."""
|
184
|
+
poly_coords = list(zip(points[:, 0], points[:, 1]))
|
185
|
+
geometries = []
|
186
|
+
for i in range(len(info)):
|
187
|
+
cell_info = info[i]
|
188
|
+
point_start_idx = cell_info[0]
|
189
|
+
point_count = cell_info[1]
|
190
|
+
part_start_idx = cell_info[2]
|
191
|
+
part_count = cell_info[3]
|
192
|
+
|
193
|
+
parts_list = []
|
194
|
+
for p in range(part_start_idx, part_start_idx + part_count):
|
195
|
+
if p >= len(parts):
|
196
|
+
continue # Prevent index out of range
|
197
|
+
part_info = parts[p]
|
198
|
+
part_point_start = part_info[0]
|
199
|
+
part_point_count = part_info[1]
|
200
|
+
|
201
|
+
coords = poly_coords[part_point_start : part_point_start + part_point_count]
|
202
|
+
if len(coords) < 3:
|
203
|
+
continue # Not a valid polygon part
|
204
|
+
parts_list.append(coords)
|
205
|
+
|
206
|
+
if not parts_list:
|
207
|
+
geometries.append(None)
|
208
|
+
elif len(parts_list) == 1:
|
209
|
+
try:
|
210
|
+
geometries.append(Polygon(parts_list[0]))
|
211
|
+
except ValueError:
|
212
|
+
geometries.append(None)
|
213
|
+
else:
|
214
|
+
try:
|
215
|
+
geometries.append(MultiPolygon([Polygon(p) for p in parts_list if len(p) >= 3]))
|
216
|
+
except ValueError:
|
217
|
+
geometries.append(None)
|
218
|
+
return geometries
|
219
|
+
|
220
|
+
def build_multilinestring(info, parts, points) -> List[Optional[LineString or MultiLineString]]:
|
221
|
+
"""Build Shapely LineString or MultiLineString geometries from HDF5 datasets."""
|
222
|
+
line_coords = list(zip(points[:, 0], points[:, 1]))
|
223
|
+
geometries = []
|
224
|
+
for i in range(len(info)):
|
225
|
+
face_info = info[i]
|
226
|
+
point_start_idx = face_info[0]
|
227
|
+
point_count = face_info[1]
|
228
|
+
part_start_idx = face_info[2]
|
229
|
+
part_count = face_info[3]
|
230
|
+
|
231
|
+
parts_list = []
|
232
|
+
for p in range(part_start_idx, part_start_idx + part_count):
|
233
|
+
if p >= len(parts):
|
234
|
+
continue # Prevent index out of range
|
235
|
+
part_info = parts[p]
|
236
|
+
part_point_start = part_info[0]
|
237
|
+
part_point_count = part_info[1]
|
238
|
+
|
239
|
+
coords = line_coords[part_point_start : part_point_start + part_point_count]
|
240
|
+
if len(coords) < 2:
|
241
|
+
continue # Cannot form LineString with fewer than 2 points
|
242
|
+
parts_list.append(coords)
|
243
|
+
|
244
|
+
if not parts_list:
|
245
|
+
geometries.append(None)
|
246
|
+
elif len(parts_list) == 1:
|
247
|
+
geometries.append(LineString(parts_list[0]))
|
248
|
+
else:
|
249
|
+
geometries.append(MultiLineString(parts_list))
|
250
|
+
return geometries
|
251
|
+
|
252
|
+
# --- Read and Process Cell Polygons ---
|
253
|
+
cell_polygons_info = network_group['Cell Polygons Info'][:]
|
254
|
+
cell_polygons_parts = network_group['Cell Polygons Parts'][:]
|
255
|
+
cell_polygons_points = network_group['Cell Polygons Points'][:]
|
256
|
+
|
257
|
+
cell_polygons_geometries = build_polygons(cell_polygons_info, cell_polygons_parts, cell_polygons_points)
|
258
|
+
|
259
|
+
# --- Read and Process Face Polylines ---
|
260
|
+
face_polylines_info = network_group['Face Polylines Info'][:]
|
261
|
+
face_polylines_parts = network_group['Face Polylines Parts'][:]
|
262
|
+
face_polylines_points = network_group['Face Polylines Points'][:]
|
263
|
+
|
264
|
+
face_polylines_geometries = build_multilinestring(face_polylines_info, face_polylines_parts, face_polylines_points)
|
265
|
+
|
266
|
+
# --- Read and Process Node Points ---
|
267
|
+
node_surface_connectivity_group = network_group.get('Node Surface Connectivity', None)
|
268
|
+
if node_surface_connectivity_group is not None:
|
269
|
+
node_surface_connectivity = node_surface_connectivity_group[:]
|
270
|
+
else:
|
271
|
+
node_surface_connectivity = None
|
272
|
+
|
273
|
+
# Assuming Node Connectivity Info and Values contain node coordinates
|
274
|
+
node_connectivity_info = network_group['Node Connectivity Info'][:]
|
275
|
+
node_connectivity_values = network_group['Node Connectivity Values'][:]
|
276
|
+
node_indices = network_group['Node Indices'][:]
|
277
|
+
node_surface_connectivity = network_group['Node Surface Connectivity'][:]
|
278
|
+
|
279
|
+
# For simplicity, assuming that node connectivity includes X and Y coordinates
|
280
|
+
# This may need to be adjusted based on actual data structure
|
281
|
+
# Here, we'll create dummy points as placeholder
|
282
|
+
# Replace with actual coordinate extraction logic as per data structure
|
283
|
+
# For demonstration, we'll create random points
|
284
|
+
# You should replace this with actual data extraction
|
285
|
+
# Example:
|
286
|
+
# node_points = network_group['Node Coordinates'][:]
|
287
|
+
# node_geometries = [Point(x, y) for x, y in node_points]
|
288
|
+
|
289
|
+
# Placeholder for node geometries
|
290
|
+
# Assuming node_indices contains Node IDs and coordinates
|
291
|
+
# Adjust based on actual dataset structure
|
292
|
+
# Here, we assume that node_indices has columns: [Node ID, X, Y]
|
293
|
+
# But based on the log, Node Surface Connectivity has ['Node ID', 'Layer', 'Layer ID', 'Sublayer ID']
|
294
|
+
# No coordinates are provided, so we cannot create Point geometries unless coordinates are available elsewhere
|
295
|
+
# Therefore, this part may need to be adapted based on actual data
|
296
|
+
# For now, we'll skip node points geometries
|
297
|
+
node_geometries = [None] * len(node_indices) # Placeholder
|
298
|
+
|
299
|
+
# --- Read and Process Cell Property Table ---
|
300
|
+
cell_property_table = network_group['Cell Property Table'][:]
|
301
|
+
cell_property_df = pd.DataFrame(cell_property_table)
|
302
|
+
|
303
|
+
# Decode byte strings if any
|
304
|
+
cell_property_df = decode_bytes(cell_property_df)
|
305
|
+
|
306
|
+
# --- Read and Process Cells DS Face Indices ---
|
307
|
+
cells_ds_face_info = network_group['Cells DS Face Indices Info'][:]
|
308
|
+
cells_ds_face_values = network_group['Cells DS Face Indices Values'][:]
|
309
|
+
|
310
|
+
# Create lists of DS Face Indices per cell
|
311
|
+
cells_ds_face_indices = []
|
312
|
+
for i in range(len(cells_ds_face_info)):
|
313
|
+
info = cells_ds_face_info[i]
|
314
|
+
start_idx, count = info
|
315
|
+
indices = cells_ds_face_values[start_idx : start_idx + count]
|
316
|
+
cells_ds_face_indices.append(indices.tolist())
|
317
|
+
|
318
|
+
# --- Read and Process Cells Face Indices ---
|
319
|
+
cells_face_info = network_group['Cells Face Indices Info'][:]
|
320
|
+
cells_face_values = network_group['Cells Face Indices Values'][:]
|
321
|
+
|
322
|
+
# Create lists of Face Indices per cell
|
323
|
+
cells_face_indices = []
|
324
|
+
for i in range(len(cells_face_info)):
|
325
|
+
info = cells_face_info[i]
|
326
|
+
start_idx, count = info
|
327
|
+
indices = cells_face_values[start_idx : start_idx + count]
|
328
|
+
cells_face_indices.append(indices.tolist())
|
329
|
+
|
330
|
+
# --- Read and Process Cells Minimum Elevations ---
|
331
|
+
cells_min_elevations = network_group['Cells Minimum Elevations'][:]
|
332
|
+
cells_min_elevations_df = pd.DataFrame(cells_min_elevations, columns=['Minimum_Elevation'])
|
333
|
+
|
334
|
+
# --- Read and Process Cells Node and Conduit IDs ---
|
335
|
+
cells_node_conduit_ids = network_group['Cells Node and Conduit IDs'][:]
|
336
|
+
cells_node_conduit_df = pd.DataFrame(cells_node_conduit_ids, columns=['Node_ID', 'Conduit_ID'])
|
337
|
+
|
338
|
+
# --- Read and Process Cells US Face Indices ---
|
339
|
+
cells_us_face_info = network_group['Cells US Face Indices Info'][:]
|
340
|
+
cells_us_face_values = network_group['Cells US Face Indices Values'][:]
|
341
|
+
|
342
|
+
# Create lists of US Face Indices per cell
|
343
|
+
cells_us_face_indices = []
|
344
|
+
for i in range(len(cells_us_face_info)):
|
345
|
+
info = cells_us_face_info[i]
|
346
|
+
start_idx, count = info
|
347
|
+
indices = cells_us_face_values[start_idx : start_idx + count]
|
348
|
+
cells_us_face_indices.append(indices.tolist())
|
349
|
+
|
350
|
+
# --- Read and Process Conduit Indices ---
|
351
|
+
conduit_indices = network_group['Conduit Indices'][:]
|
352
|
+
conduit_indices_df = pd.DataFrame(conduit_indices, columns=['Conduit_ID'])
|
353
|
+
|
354
|
+
# --- Read and Process Face Property Table ---
|
355
|
+
face_property_table = network_group['Face Property Table'][:]
|
356
|
+
face_property_df = pd.DataFrame(face_property_table)
|
357
|
+
|
358
|
+
# Decode byte strings if any
|
359
|
+
face_property_df = decode_bytes(face_property_df)
|
360
|
+
|
361
|
+
# --- Read and Process Face Conduit ID and Stations ---
|
362
|
+
faces_conduit_id_stations = network_group['Faces Conduit ID and Stations'][:]
|
363
|
+
faces_conduit_df = pd.DataFrame(faces_conduit_id_stations, columns=['ConduitID', 'ConduitStation', 'CellUS', 'CellDS', 'Elevation'])
|
364
|
+
|
365
|
+
# --- Read and Process Node Connectivity Info and Values ---
|
366
|
+
node_connectivity_info = network_group['Node Connectivity Info'][:]
|
367
|
+
node_connectivity_values = network_group['Node Connectivity Values'][:]
|
368
|
+
|
369
|
+
# Create lists of connected nodes per node
|
370
|
+
node_connectivity = []
|
371
|
+
for i in range(len(node_connectivity_info)):
|
372
|
+
info = node_connectivity_info[i]
|
373
|
+
start_idx, count = info
|
374
|
+
connections = node_connectivity_values[start_idx : start_idx + count]
|
375
|
+
node_connectivity.append(connections.tolist())
|
376
|
+
|
377
|
+
# --- Read and Process Node Indices ---
|
378
|
+
node_indices = network_group['Node Indices'][:]
|
379
|
+
node_indices_df = pd.DataFrame(node_indices, columns=['Node_ID'])
|
380
|
+
|
381
|
+
# --- Read and Process Node Surface Connectivity ---
|
382
|
+
node_surface_connectivity = network_group['Node Surface Connectivity'][:]
|
383
|
+
node_surface_connectivity_df = pd.DataFrame(node_surface_connectivity, columns=['Node_ID', 'Layer', 'Layer_ID', 'Sublayer_ID'])
|
384
|
+
|
385
|
+
# --- Combine All Cell-Related Data ---
|
386
|
+
cells_df = pd.DataFrame({
|
387
|
+
'Cell_ID': range(len(cell_polygons_geometries)),
|
388
|
+
'Conduit_ID': cells_node_conduit_df['Conduit_ID'],
|
389
|
+
'Node_ID': cells_node_conduit_df['Node_ID'],
|
390
|
+
'Minimum_Elevation': cells_min_elevations_df['Minimum_Elevation'],
|
391
|
+
'DS_Face_Indices': cells_ds_face_indices,
|
392
|
+
'Face_Indices': cells_face_indices,
|
393
|
+
'US_Face_Indices': cells_us_face_indices,
|
394
|
+
'Cell_Property_Info_Index': cell_property_df['Info Index'],
|
395
|
+
# Add other cell properties as needed
|
396
|
+
})
|
397
|
+
|
398
|
+
# Merge with cell property table
|
399
|
+
cells_df = cells_df.merge(cell_property_df, left_on='Cell_Property_Info_Index', right_index=True, how='left')
|
400
|
+
|
401
|
+
# --- Combine All Face-Related Data ---
|
402
|
+
faces_df = pd.DataFrame({
|
403
|
+
'Face_ID': range(len(face_polylines_geometries)),
|
404
|
+
'Conduit_ID': faces_conduit_df['ConduitID'],
|
405
|
+
'Conduit_Station': faces_conduit_df['ConduitStation'],
|
406
|
+
'Cell_US': faces_conduit_df['CellUS'],
|
407
|
+
'Cell_DS': faces_conduit_df['CellDS'],
|
408
|
+
'Elevation': faces_conduit_df['Elevation'],
|
409
|
+
'Face_Property_Info_Index': face_property_df['Info Index'],
|
410
|
+
# Add other face properties as needed
|
411
|
+
})
|
412
|
+
|
413
|
+
# Merge with face property table
|
414
|
+
faces_df = faces_df.merge(face_property_df, left_on='Face_Property_Info_Index', right_index=True, how='left')
|
415
|
+
|
416
|
+
# --- Combine All Node-Related Data ---
|
417
|
+
nodes_df = pd.DataFrame({
|
418
|
+
'Node_ID': node_indices_df['Node_ID'],
|
419
|
+
'Connected_Nodes': node_connectivity,
|
420
|
+
# Add other node properties as needed
|
421
|
+
})
|
422
|
+
|
423
|
+
# Merge with node surface connectivity
|
424
|
+
nodes_df = nodes_df.merge(node_surface_connectivity_df, on='Node_ID', how='left')
|
425
|
+
|
426
|
+
# --- Create GeoDataFrame ---
|
427
|
+
# Main DataFrame will be cells with their polygons
|
428
|
+
cells_df['Cell_Polygon'] = cell_polygons_geometries
|
429
|
+
|
430
|
+
# Add face polylines as a separate column (list of geometries)
|
431
|
+
cells_df['Face_Polylines'] = cells_df['Face_Indices'].apply(lambda indices: [face_polylines_geometries[i] for i in indices if i < len(face_polylines_geometries)])
|
432
|
+
|
433
|
+
# Add node points if geometries are available
|
434
|
+
# Currently, node_geometries are placeholders (None). Replace with actual geometries if available.
|
435
|
+
cells_df['Node_Point'] = cells_df['Node_ID'].apply(lambda nid: node_geometries[nid] if nid < len(node_geometries) else None)
|
436
|
+
|
437
|
+
# Initialize GeoDataFrame with Cell Polygons
|
438
|
+
gdf = gpd.GeoDataFrame(cells_df, geometry='Cell_Polygon', crs=crs)
|
439
|
+
|
440
|
+
# Optionally, add Face Polylines and Node Points as separate columns
|
441
|
+
# Note: GeoPandas primarily supports one geometry column, so these are stored as object columns
|
442
|
+
gdf['Face_Polylines'] = cells_df['Face_Polylines']
|
443
|
+
gdf['Node_Point'] = cells_df['Node_Point']
|
444
|
+
|
445
|
+
# You can further expand this GeoDataFrame by merging with faces_df and nodes_df if needed
|
446
|
+
|
447
|
+
return gdf
|
448
|
+
|
449
|
+
|
450
|
+
|
451
|
+
|
452
|
+
|
453
|
+
|
454
|
+
@staticmethod
|
455
|
+
@log_call
|
456
|
+
@standardize_input(file_type='plan_hdf')
|
457
|
+
def get_pipe_network_timeseries(hdf_path: Path, variable: str) -> xr.DataArray:
|
458
|
+
"""
|
459
|
+
Extract timeseries data for a specific variable in the pipe network.
|
460
|
+
|
461
|
+
Args:
|
462
|
+
hdf_path (Path): Path to the HDF file.
|
463
|
+
variable (str): Variable to extract (e.g., "Cell Courant", "Cell Water Surface").
|
464
|
+
|
465
|
+
Returns:
|
466
|
+
xr.DataArray: DataArray containing the timeseries data.
|
467
|
+
|
468
|
+
Raises:
|
469
|
+
KeyError: If the required datasets are not found in the HDF file.
|
470
|
+
ValueError: If an invalid variable is specified.
|
471
|
+
"""
|
472
|
+
valid_variables = [
|
473
|
+
"Cell Courant", "Cell Water Surface", "Face Flow", "Face Velocity",
|
474
|
+
"Face Water Surface", "Pipes/Pipe Flow DS", "Pipes/Pipe Flow US",
|
475
|
+
"Pipes/Vel DS", "Pipes/Vel US", "Nodes/Depth", "Nodes/Drop Inlet Flow",
|
476
|
+
"Nodes/Water Surface"
|
477
|
+
]
|
478
|
+
|
479
|
+
if variable not in valid_variables:
|
480
|
+
raise ValueError(f"Invalid variable. Must be one of: {', '.join(valid_variables)}")
|
481
|
+
|
482
|
+
try:
|
483
|
+
with h5py.File(hdf_path, 'r') as hdf:
|
484
|
+
# Extract timeseries data
|
485
|
+
data_path = f"/Results/Unsteady/Output/Output Blocks/DSS Hydrograph Output/Unsteady Time Series/Pipe Networks/Davis/{variable}"
|
486
|
+
data = hdf[data_path][()]
|
487
|
+
|
488
|
+
# Extract time information
|
489
|
+
time = HdfBase._get_unsteady_datetimes(hdf)
|
490
|
+
|
491
|
+
# Create DataArray
|
492
|
+
da = xr.DataArray(
|
493
|
+
data=data,
|
494
|
+
dims=['time', 'location'],
|
495
|
+
coords={'time': time, 'location': range(data.shape[1])},
|
496
|
+
name=variable
|
497
|
+
)
|
498
|
+
|
499
|
+
# Add attributes
|
500
|
+
da.attrs['units'] = hdf[data_path].attrs.get('Units', b'').decode('utf-8')
|
501
|
+
da.attrs['variable'] = variable
|
502
|
+
|
503
|
+
return da
|
504
|
+
|
505
|
+
except KeyError as e:
|
506
|
+
logger.error(f"Required dataset not found in HDF file: {e}")
|
507
|
+
raise
|
508
|
+
except Exception as e:
|
509
|
+
logger.error(f"Error extracting pipe network timeseries data: {e}")
|
510
|
+
raise
|
511
|
+
|
512
|
+
|
513
|
+
|
514
|
+
|
515
|
+
|
516
|
+
|
517
|
+
|
518
|
+
|
519
|
+
|
520
|
+
@staticmethod
|
521
|
+
@log_call
|
522
|
+
@standardize_input(file_type='plan_hdf')
|
523
|
+
def get_pipe_network_summary(hdf_path: Path) -> pd.DataFrame:
|
524
|
+
"""
|
525
|
+
Extract summary data for pipe networks from the HDF file.
|
526
|
+
|
527
|
+
Args:
|
528
|
+
hdf_path (Path): Path to the HDF file.
|
529
|
+
|
530
|
+
Returns:
|
531
|
+
pd.DataFrame: DataFrame containing pipe network summary data.
|
532
|
+
|
533
|
+
Raises:
|
534
|
+
KeyError: If the required datasets are not found in the HDF file.
|
535
|
+
"""
|
536
|
+
try:
|
537
|
+
with h5py.File(hdf_path, 'r') as hdf:
|
538
|
+
# Extract summary data
|
539
|
+
summary_path = "/Results/Unsteady/Summary/Pipe Network"
|
540
|
+
if summary_path not in hdf:
|
541
|
+
logger.warning("Pipe Network summary data not found in HDF file")
|
542
|
+
return pd.DataFrame()
|
543
|
+
|
544
|
+
summary_data = hdf[summary_path][()]
|
545
|
+
|
546
|
+
# Create DataFrame
|
547
|
+
df = pd.DataFrame(summary_data)
|
548
|
+
|
549
|
+
# Convert column names
|
550
|
+
df.columns = [col.decode('utf-8') for col in df.columns]
|
551
|
+
|
552
|
+
return df
|
553
|
+
|
554
|
+
except KeyError as e:
|
555
|
+
logger.error(f"Required dataset not found in HDF file: {e}")
|
556
|
+
raise
|
557
|
+
except Exception as e:
|
558
|
+
logger.error(f"Error extracting pipe network summary data: {e}")
|
559
|
+
raise
|
560
|
+
|
561
|
+
@staticmethod
|
562
|
+
@log_call
|
563
|
+
@standardize_input(file_type='plan_hdf')
|
564
|
+
def get_pipe_profile(hdf_path: Path, conduit_id: int) -> pd.DataFrame:
|
565
|
+
"""
|
566
|
+
Extract the profile data for a specific pipe conduit.
|
567
|
+
|
568
|
+
Args:
|
569
|
+
hdf_path (Path): Path to the HDF file.
|
570
|
+
conduit_id (int): ID of the conduit to extract profile for.
|
571
|
+
|
572
|
+
Returns:
|
573
|
+
pd.DataFrame: DataFrame containing the pipe profile data.
|
574
|
+
|
575
|
+
Raises:
|
576
|
+
KeyError: If the required datasets are not found in the HDF file.
|
577
|
+
IndexError: If the specified conduit_id is out of range.
|
578
|
+
"""
|
579
|
+
try:
|
580
|
+
with h5py.File(hdf_path, 'r') as hdf:
|
581
|
+
# Get conduit info
|
582
|
+
terrain_profiles_info = hdf['/Geometry/Pipe Conduits/Terrain Profiles Info'][()]
|
583
|
+
|
584
|
+
if conduit_id >= len(terrain_profiles_info):
|
585
|
+
raise IndexError(f"conduit_id {conduit_id} is out of range")
|
586
|
+
|
587
|
+
start, count = terrain_profiles_info[conduit_id]
|
588
|
+
|
589
|
+
# Extract profile data
|
590
|
+
profile_values = hdf['/Geometry/Pipe Conduits/Terrain Profiles Values'][start:start+count]
|
591
|
+
|
592
|
+
# Create DataFrame
|
593
|
+
df = pd.DataFrame(profile_values, columns=['Station', 'Elevation'])
|
594
|
+
|
595
|
+
return df
|
596
|
+
|
597
|
+
except KeyError as e:
|
598
|
+
logger.error(f"Required dataset not found in HDF file: {e}")
|
599
|
+
raise
|
600
|
+
except IndexError as e:
|
601
|
+
logger.error(f"Invalid conduit_id: {e}")
|
602
|
+
raise
|
603
|
+
except Exception as e:
|
604
|
+
logger.error(f"Error extracting pipe profile data: {e}")
|
605
|
+
raise
|
606
|
+
|
607
|
+
|
608
|
+
# New functions from the AWS webinar where the code was developed
|
609
|
+
# Some of these may be duplicative of the above, always use the above
|
610
|
+
|
611
|
+
@staticmethod
|
612
|
+
@log_call
|
613
|
+
@standardize_input(file_type='plan_hdf')
|
614
|
+
def extract_wsel_for_cell(plan_hdf_path: Path, cell_id: int) -> Dict[str, Any]:
|
615
|
+
"""
|
616
|
+
Extract water surface elevation time series for a specific 2D cell.
|
617
|
+
|
618
|
+
Parameters:
|
619
|
+
-----------
|
620
|
+
plan_hdf_path : Path
|
621
|
+
Path to HEC-RAS results HDF file
|
622
|
+
cell_id : int
|
623
|
+
ID of the cell to extract data for
|
624
|
+
|
625
|
+
Returns:
|
626
|
+
--------
|
627
|
+
Dict containing:
|
628
|
+
'time_values': array of time values
|
629
|
+
'wsel_timeseries': water surface elevation time series
|
630
|
+
'peak_value': maximum water surface elevation
|
631
|
+
'peak_time': time of maximum water surface elevation
|
632
|
+
"""
|
633
|
+
try:
|
634
|
+
cells_timeseries_ds = HdfResultsMesh.mesh_cells_timeseries_output(plan_hdf_path)
|
635
|
+
water_surface = cells_timeseries_ds['area2']['Water Surface']
|
636
|
+
time_values = water_surface.coords['time'].values
|
637
|
+
wsel_timeseries = water_surface.sel(cell_id=cell_id)
|
638
|
+
|
639
|
+
peak_value = wsel_timeseries.max().item()
|
640
|
+
peak_index = wsel_timeseries.argmax().item()
|
641
|
+
|
642
|
+
return {
|
643
|
+
'time_values': time_values,
|
644
|
+
'wsel_timeseries': wsel_timeseries,
|
645
|
+
'peak_value': peak_value,
|
646
|
+
'peak_time': time_values[peak_index]
|
647
|
+
}
|
648
|
+
except Exception as e:
|
649
|
+
logger.error(f"Error extracting water surface elevation for cell {cell_id}: {str(e)}")
|
650
|
+
raise
|
651
|
+
|
652
|
+
@staticmethod
|
653
|
+
@log_call
|
654
|
+
@standardize_input(file_type='plan_hdf')
|
655
|
+
def extract_pipe_network_data(plan_hdf_path: Path) -> Tuple[gpd.GeoDataFrame, gpd.GeoDataFrame]:
|
656
|
+
"""
|
657
|
+
Extract pipe nodes and conduits data from HEC-RAS results.
|
658
|
+
|
659
|
+
Parameters:
|
660
|
+
-----------
|
661
|
+
plan_hdf_path : Path
|
662
|
+
Path to HEC-RAS results HDF file
|
663
|
+
|
664
|
+
Returns:
|
665
|
+
--------
|
666
|
+
Tuple[GeoDataFrame, GeoDataFrame]:
|
667
|
+
First GeoDataFrame contains pipe nodes data
|
668
|
+
Second GeoDataFrame contains pipe conduits data
|
669
|
+
"""
|
670
|
+
try:
|
671
|
+
pipe_nodes_gdf = HdfPipe.get_pipe_nodes(plan_hdf_path)
|
672
|
+
pipe_conduits_gdf = HdfPipe.get_pipe_conduits(plan_hdf_path)
|
673
|
+
|
674
|
+
return pipe_nodes_gdf, pipe_conduits_gdf
|
675
|
+
except Exception as e:
|
676
|
+
logger.error(f"Error extracting pipe network data: {str(e)}")
|
677
|
+
raise
|
678
|
+
|
679
|
+
@staticmethod
|
680
|
+
@log_call
|
681
|
+
@standardize_input(file_type='plan_hdf')
|
682
|
+
def extract_timeseries_for_node(plan_hdf_path: Path, node_id: int) -> Dict[str, xr.DataArray]:
|
683
|
+
"""
|
684
|
+
Extract time series data for a specific node.
|
685
|
+
|
686
|
+
Parameters:
|
687
|
+
-----------
|
688
|
+
plan_hdf_path : Path
|
689
|
+
Path to HEC-RAS results HDF file
|
690
|
+
node_id : int
|
691
|
+
ID of the node to extract data for
|
692
|
+
|
693
|
+
Returns:
|
694
|
+
--------
|
695
|
+
Dict[str, xr.DataArray]: Dictionary containing time series data for:
|
696
|
+
- Depth
|
697
|
+
- Drop Inlet Flow
|
698
|
+
- Water Surface
|
699
|
+
"""
|
700
|
+
try:
|
701
|
+
node_variables = ["Nodes/Depth", "Nodes/Drop Inlet Flow", "Nodes/Water Surface"]
|
702
|
+
node_data = {}
|
703
|
+
|
704
|
+
for variable in node_variables:
|
705
|
+
data = HdfPipe.get_pipe_network_timeseries(plan_hdf_path, variable=variable)
|
706
|
+
node_data[variable] = data.sel(location=node_id)
|
707
|
+
|
708
|
+
return node_data
|
709
|
+
except Exception as e:
|
710
|
+
logger.error(f"Error extracting time series data for node {node_id}: {str(e)}")
|
711
|
+
raise
|
712
|
+
|
713
|
+
@staticmethod
|
714
|
+
@log_call
|
715
|
+
@standardize_input(file_type='plan_hdf')
|
716
|
+
def extract_timeseries_for_conduit(plan_hdf_path: Path, conduit_id: int) -> Dict[str, xr.DataArray]:
|
717
|
+
"""
|
718
|
+
Extract time series data for a specific conduit.
|
719
|
+
|
720
|
+
Parameters:
|
721
|
+
-----------
|
722
|
+
plan_hdf_path : Path
|
723
|
+
Path to HEC-RAS results HDF file
|
724
|
+
conduit_id : int
|
725
|
+
ID of the conduit to extract data for
|
726
|
+
|
727
|
+
Returns:
|
728
|
+
--------
|
729
|
+
Dict[str, xr.DataArray]: Dictionary containing time series data for:
|
730
|
+
- Pipe Flow (US/DS)
|
731
|
+
- Velocity (US/DS)
|
732
|
+
"""
|
733
|
+
try:
|
734
|
+
conduit_variables = ["Pipes/Pipe Flow DS", "Pipes/Pipe Flow US",
|
735
|
+
"Pipes/Vel DS", "Pipes/Vel US"]
|
736
|
+
conduit_data = {}
|
737
|
+
|
738
|
+
for variable in conduit_variables:
|
739
|
+
data = HdfPipe.get_pipe_network_timeseries(plan_hdf_path, variable=variable)
|
740
|
+
conduit_data[variable] = data.sel(location=conduit_id)
|
741
|
+
|
742
|
+
return conduit_data
|
743
|
+
except Exception as e:
|
744
|
+
logger.error(f"Error extracting time series data for conduit {conduit_id}: {str(e)}")
|
745
|
+
raise
|
746
|
+
|
747
|
+
@staticmethod
|
748
|
+
@log_call
|
749
|
+
@standardize_input(file_type='plan_hdf')
|
750
|
+
def get_pipe_profile(plan_hdf_path: Path, conduit_id: int) -> pd.DataFrame:
|
751
|
+
"""
|
752
|
+
Get profile data for a specific pipe conduit.
|
753
|
+
|
754
|
+
Parameters:
|
755
|
+
-----------
|
756
|
+
plan_hdf_path : Path
|
757
|
+
Path to HEC-RAS results HDF file
|
758
|
+
conduit_id : int
|
759
|
+
ID of the conduit to get profile for
|
760
|
+
|
761
|
+
Returns:
|
762
|
+
--------
|
763
|
+
pd.DataFrame: DataFrame containing station and elevation data
|
764
|
+
"""
|
765
|
+
try:
|
766
|
+
pipe_profile_df = HdfPipe.get_pipe_profile(plan_hdf_path, conduit_id)
|
767
|
+
return pipe_profile_df
|
768
|
+
except Exception as e:
|
769
|
+
logger.error(f"Error getting pipe profile for conduit {conduit_id}: {str(e)}")
|
770
|
+
raise
|
771
|
+
|